source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
0fa8731d37e95224dbb06693b0c5599e66d59f4c.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int padfunc(struct dataobj *restrict vp_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m)
{
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[abc_x_l + 12][y + 12][z + 12] = vp[22][y + 12][z + 12];
}
}
}
for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[abc_x_r + 12][y + 12][z + 12] = vp[x_M + 2][y + 12][z + 12];
}
}
}
#pragma omp target teams distribute parallel for collapse(1)
for (int x = x_m; x <= x_M; x += 1)
{
for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[x + 12][abc_y_l + 12][z + 12] = vp[x + 12][22][z + 12];
}
}
for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[x + 12][abc_y_r + 12][z + 12] = vp[x + 12][y_M + 2][z + 12];
}
}
for (int y = y_m; y <= y_M; y += 1)
{
for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1)
{
vp[x + 12][y + 12][abc_z_l + 12] = vp[x + 12][y + 12][22];
}
for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1)
{
vp[x + 12][y + 12][abc_z_r + 12] = vp[x + 12][y + 12][z_M + 2];
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
#pragma omp target update from(vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
#pragma omp target exit data map(release: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
|
vmult_2_save.c | #define _TH_1 2
#include <omp.h>
void vmult_2(const int M,const int N,const double alpha,const double* val,const int n_rows,const double* src,const int incX,const double beta,double* dst,const int incY,int* rowstart,int* col) {
int row;
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest1=Nest)@*/#pragma omp for private(row)
for (row=0; row<n_rows; row+=1)
{
double* val_ptr=val+rowstart[row];
int* col_ptr=col+rowstart[row];
double s=dst[row];
double* val_end_of_row=val+rowstart[row+1];
while (val_ptr!=val_end_of_row)
{
s = s+*val_ptr++*src[*col_ptr++];
}
dst[row] = s;
}
}
}
|
VariableSubSampler.h | /**
* @file
* This file is part of SeisSol.
*
* @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger)
*
* @section LICENSE
* Copyright (c) 2015, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
*/
#ifndef VARIABLE_SUBSAMPLER_H_
#define VARIABLE_SUBSAMPLER_H_
#include <cassert>
#include <algorithm>
#include <glm/vec3.hpp>
#include "Geometry/MeshReader.h"
#include "Numerical_aux/BasisFunction.h"
#include "RefinerUtils.h"
namespace seissol
{
namespace refinement
{
//------------------------------------------------------------------------------
template<class T>
class VariableSubsampler
{
private:
std::vector<basisFunction::SampledBasisFunctions<T> > m_BasisFunctions;
/** The original number of cells (without refinement) */
const unsigned int m_numCells;
const unsigned int kSubCellsPerCell;
const unsigned int kNumVariables;
const unsigned int kNumAlignedDOF;
std::size_t getInVarOffset(unsigned int cell, unsigned int variable,
const unsigned int* cellMap) const
{
return (cellMap[cell]*kNumVariables + variable) * kNumAlignedDOF;
}
std::size_t getOutVarOffset(unsigned cell, unsigned int subcell) const
{
return kSubCellsPerCell * cell + subcell;
}
public:
VariableSubsampler(
unsigned int numCells,
const TetrahedronRefiner<T>& tetRefiner,
unsigned int order,
unsigned int numVariables,
unsigned int numAlignedDOF
);
void get(const double* inData, const unsigned int* cellMap,
int variable, double* outData) const;
};
//------------------------------------------------------------------------------
template<typename T>
VariableSubsampler<T>::VariableSubsampler(
unsigned int numCells,
const TetrahedronRefiner<T>& tetRefiner,
unsigned int order,
unsigned int numVariables,
unsigned int numAlignedDOF)
: m_numCells(numCells),
kSubCellsPerCell(tetRefiner.getDivisionCount()),
kNumVariables(numVariables), kNumAlignedDOF(numAlignedDOF)
{
// Generate cell centerpoints in the reference or unit tetrahedron.
Tetrahedron<T>* subCells = new Tetrahedron<T>[kSubCellsPerCell];
glm::tvec3<T>* additionalVertices = new glm::tvec3<T>[tetRefiner.additionalVerticesPerCell()];
tetRefiner.refine(Tetrahedron<T>::unitTetrahedron(), 0,
subCells, additionalVertices);
// Generate sampled basicfunctions
for (unsigned int i = 0; i < kSubCellsPerCell; i++) {
const glm::tvec3<T> pnt = subCells[i].center();
m_BasisFunctions.push_back(
basisFunction::SampledBasisFunctions<T>(
order, pnt.x, pnt.y, pnt.z));
}
delete [] subCells;
delete [] additionalVertices;
}
//------------------------------------------------------------------------------
template<typename T>
void VariableSubsampler<T>::get(const double* inData, const unsigned int* cellMap,
int variable, double* outData) const
{
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
// Iterate over original Cells
for (unsigned int c = 0; c < m_numCells; ++c) {
for (unsigned int sc = 0; sc < kSubCellsPerCell; ++sc) {
outData[getOutVarOffset(c, sc)] =
m_BasisFunctions[sc].evalWithCoeffs(&inData[getInVarOffset(c, variable, cellMap)]);
}
}
}
//------------------------------------------------------------------------------
} // namespace
}
#endif // VARIABLE_SUBSAMPLER_H_
|
GB_unaryop__ainv_uint16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_int8
// op(A') function: GB_tran__ainv_uint16_int8
// C type: uint16_t
// A type: int8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_int8
(
uint16_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HelloWorldOMP.c | /*
OpenMP example program Hello World.
The master thread forks a parallel region.
All threads in the team obtain their thread number and print it.
Only the master thread prints the total number of threads.
Compile with: gcc -O3 -fopenmp omp_hello.c -o omp_hello
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]) {
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Get thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
exit(0);
}
|
GB_unaryop__lnot_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_uint64
// op(A') function: GB_tran__lnot_int32_uint64
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_uint64
(
int32_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csyrk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsyrk.c, normal z -> c, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_syrk
*
* Performs one of the symmetric rank k operations
*
* \f[ C = \alpha A \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times A + \beta C, \f]
*
* where alpha and beta are scalars, C is an n-by-n symmetric
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f]
* - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_csyrk
* @sa plasma_csyrk
* @sa plasma_dsyrk
* @sa plasma_ssyrk
*
******************************************************************************/
int plasma_csyrk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t beta, plasma_complex32_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
}
else {
am = k;
an = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syrk(plasma, PlasmaComplexFloat, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_csyrk(uplo, trans,
alpha, A,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_syrk
*
* Performs rank k update.
* Non-blocking tile version of plasma_csyrk().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f]
* - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_csyrk
* @sa plasma_omp_csyrk
* @sa plasma_omp_csyrk
* @sa plasma_omp_dsyrk
* @sa plasma_omp_ssyrk
*
******************************************************************************/
void plasma_omp_csyrk(plasma_enum_t uplo, plasma_enum_t trans,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_complex32_t beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pcsyrk(uplo, trans,
alpha, A,
beta, C,
sequence, request);
}
|
pi_omp_teams.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1.
History: Written by Tim Mattson, 11/99.
Configure eclipse based: https://medium.com/swlh/openmp-on-ubuntu-1145355eeb2
*/
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <stdlib.h>
// Declare variables
uint32_t num_steps_teams = 100000000;
uint32_t teams_number = 2;
uint32_t max_num_threads = 2;
double step;
// Constant for help on usage
static char usage[] = "usage: %s [-h] [-a] -t teams_number -n thread_number\n"
"-t Maximum number of teams.\n"
"-n Maximum number of threads.\n"
"-a displays the information of the authors of the program.\n"
"-h displays the usage message to let the user know how to execute the application.\n";
// Internal functions declarations
double pi_opm_teams(uint32_t num_steps, uint32_t teams_number, uint32_t max_num_threads);
void check_required_inputs(int i_flag, int o_flag, char *prog_name);
// Main code
int main(int argc, char **argv) {
// Use flags below to tell if the required arguments were provided
int t_flag = 0;
int n_flag = 0;
// To save number of threads to use
uint32_t max_num_threads;
// To save number of teams to use
uint32_t teams_number;
int c;
while ((c = getopt(argc, argv, "hat:n:")) != -1)
switch (c){
case 't':
t_flag = 1;
teams_number = atoi(optarg);
break;
case 'n':
n_flag = 1;
max_num_threads = atoi(optarg);
break;
case 'h':
fprintf(stderr, usage, argv[0]);
exit(1);
break;
case 'a':
printf("Authors: agomez and rcespedes\n");
exit(1);
break;
case ':':
break;
case '?':
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
return 1;
default:
abort();
}
// Check required arguments were provided
check_required_inputs(t_flag, n_flag, argv[0]);
// number of teams to run
num_steps_teams = 100000000;
printf("Configured to use -Teams number: %d | -Max thread number: %d\n",teams_number, max_num_threads);
pi_opm_teams(num_steps_teams, 8, 8);
return 0;
}
// Function to check user provided the required arguments
void check_required_inputs(int t_flag, int n_flag, char *prog_name){
// Check required arguments were provided. Print error and abort otherwise
if (!t_flag){
fprintf(stderr, "%s: missing -t option\n", prog_name);
fprintf(stderr, usage, prog_name);
exit(1);
}
if (!n_flag){
fprintf(stderr, "%s: missing -n option\n", prog_name);
fprintf(stderr, usage, prog_name);
exit(1);
}
}
double pi_opm_teams(uint32_t num_steps, uint32_t teams_number, uint32_t max_num_threads){
// Declare Internal Variables
uint32_t i, used_teams, used_threads = 0;
double x, pi, sum = 0.0;
double start_time, run_time;
// Calcualte step size
step = 1.0 / (double)num_steps;
// get start time
start_time = omp_get_wtime();
// Make for loop parallel with the teams construct and a thread_limit
// Make for loop parallel with the teams construct
// Set reduction pragma to optimize sum operation and make the x variable private between threads
#pragma omp teams num_teams(teams_number) thread_limit(max_num_threads)
used_teams = omp_get_num_teams();
used_threads = omp_get_num_threads();
// the reduction operation makes the variable private and causes the system to perform a reduction optimization at the end of the parallel region. Also, uses the distribute construct to evenly distribute the loop executions among threads
#pragma omp distribute parallel for reduction(+:sum) private (x)
for (i = 1; i <= num_steps; i++) {
x = (i - 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("Current settings -Teams used number: %d | -Threads used: %d \n",used_teams, used_threads);
printf("pi teams implementation with %d steps is %lf in %.12lf seconds\n", num_steps, pi, run_time);
return pi;
}
|
singleModificado2.c | /*
$ gcc -fopenmp -O2 src/single.c -o bin/single
$ ./bin/single
Introduce valor de inicialización a: 1
Single ejecutada por el thread 0
Depués de la región parallel:
b[0] = 1 b[1] = 1 b[2] = 1 b[3] = 1 b[4] = 1 b[5] = 1 b[6] = 1 b[7] = 1 b[8] = 1
*/
#include <stdio.h>
#include <omp.h>
main() {
int n = 9, i, a, b[n];
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel
{
#pragma omp master
{
printf("Introduce valor de inicialización a: ");
scanf("%d", &a );
printf("Single ejecutada por el thread %d\n", omp_get_thread_num());
}
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++){
b[i] = a;
printf("b[%d] = %d\t", i, a);
}
}
printf("\n");
} |
GB_binop__pow_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_int16
// A.*B function (eWiseMult): GB_AemultB__pow_int16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_int16
// C+=b function (dense accum): GB_Cdense_accumb__pow_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int16
// C=scalar+B GB_bind1st__pow_int16
// C=scalar+B' GB_bind1st_tran__pow_int16
// C=A+scalar GB_bind2nd__pow_int16
// C=A'+scalar GB_bind2nd_tran__pow_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_pow_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_pow_int16 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__pow_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_pow_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_pow_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int16 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_int16 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kmeans.c | /** @file kmeans.c
** @brief K-means - Declaration
** @author Andrea Vedaldi, David Novotny
**/
/*
Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
Copyright (C) 2013 Andrea Vedaldi and David Novotny.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans K-means clustering
@author Andrea Vedaldi
@author David Novotny
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref kmeans.h implements a number of algorithm for **K-means
quantization**: Lloyd @cite{lloyd82least}, an accelerated version by
Elkan @cite{elkan03using}, and a large scale algorithm based on
Approximate Nearest Neighbors (ANN). All algorithms support @c float
or @c double data and can use the $l^1$ or the $l^2$ distance for
clustering. Furthermore, all algorithms can take advantage of multiple
CPU cores.
Please see @subpage kmeans-fundamentals for a technical description of
K-means and of the algorithms implemented here.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The goal of K-means is to partition a dataset into $K$
“compact” clusters. The following example demonstrates
using @ref kmeans.h in the C programming language to partition @c
numData @c float vectors into compute @c numCenters clusters using
Lloyd's algorithm:
@code
#include <vl/kmeans.h>
double energy ;
double * centers ;
// Use float data and the L2 distance for clustering
KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
// Use Lloyd algorithm
vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ;
// Initialize the cluster centers by randomly sampling the data
vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ;
// Run at most 100 iterations of cluster refinement using Lloyd algorithm
vl_kmeans_set_max_num_iterations (kmeans, 100) ;
vl_kmeans_refine_centers (kmeans, data, numData) ;
// Obtain the energy of the solution
energy = vl_kmeans_get_energy(kmeans) ;
// Obtain the cluster centers
centers = vl_kmeans_get_centers(kmeans) ;
@endcode
Once the centers have been obtained, new data points can be assigned
to clusters by using the ::vl_kmeans_quantize function:
@code
vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ;
float * distances = vl_malloc(sizeof(float) * numData) ;
vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ;
@endcode
Alternatively, one can directly assign new pointers to the closest
centers, without bothering with a ::VlKMeans object.
There are several considerations that may impact the performance of
KMeans. First, since K-means is usually based local optimization
algorithm, the **initialization method** is important. The following
initialization methods are supported:
Method | Function | Description
---------------|-----------------------------------------|-----------------------------------------------
Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points
K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity
Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only)
See @ref kmeans-init for further details. The initialization methods
use a randomized selection of the data points; the random number
generator init is controlled by ::vl_rand_init.
The second important choice is the **optimization algorithm**. The
following optimization algorithms are supported:
Algorithm | Symbol | See | Description
------------|------------------|-------------------|-----------------------------------------------
Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization
Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities
ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors
See the relative sections for further details. These algorithm are
iterative, and stop when either a **maximum number of iterations**
(::vl_kmeans_set_max_num_iterations) is reached, or when the energy
changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation).
All the three algorithms support multithreaded computations. The number
of threads used is usually controlled globally by ::vl_set_num_threads.
**/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page kmeans-fundamentals K-means fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means
is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and
`assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the
centers such that the sum of distances
\[
E(\bc_1,\dots,\bc_k,q_1,\dots,q_n)
= \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p
\]
is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm),
because in this case the optimal centers are the means of the input
vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm)
will also be considered.
Up to normalization, the K-means objective $E$ is also the average
reconstruction error if the original points are approximated with the
cluster centers. Thus K-means is used not only to group the input
points into cluster, but also to `quantize` their values.
K-means is widely used in computer vision, for example in the
construction of vocabularies of visual features (visual words). In
these applications the number $n$ of points to cluster and/or the
number $K$ of clusters is often large. Unfortunately, minimizing the
objective $E$ is in general a difficult combinatorial problem, so
locally optimal or approximated solutions are sought instead.
The basic K-means algorithm alternate between re-estimating the
centers and the assignments (@ref kmeans-lloyd). Combined with a good
initialization strategy (@ref kmeans-init) and, potentially, by
re-running the optimization from a number of randomized starting
states, this algorithm may attain satisfactory solutions in practice.
However, despite its simplicity, Lloyd's algorithm is often too slow.
A good replacement is Elkan's algorithm (@ref kmeans-elkan), which
uses the triangular inequality to cut down significantly the cost of
Lloyd's algorithm. Since this algorithm is otherwise equivalent, it
should often be preferred.
For very large problems (millions of point to clusters and hundreds,
thousands, or more clusters to find), even Elkan's algorithm is not
sufficiently fast. In these cases, one can resort to a variant of
Lloyd's algorithm that uses an approximated nearest neighbors routine
(@ref kmeans-ann).
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-init Initialization methods
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
All the $K$-means algorithms considered here find locally optimal
solutions; as such the way they are initialized is important. @ref
kmeans.h supports the following initialization algorithms:
@par Random data samples
The simplest initialization method is to sample $K$ points at random
from the input data and use them as initial values for the cluster
centers.
@par K-means++
@cite{arthur07k-means} proposes a randomized initialization of the
centers which improves upon random selection. The first center $\bc_1$
is selected at random from the data points $\bx_1, \dots, \bx_n $ and
the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is
computed. Then the second center $\bc_2$ is selected at random from
the data points with probability proportional to the distance. The
procedure is repeated to obtain the other centers by using the minimum
distance to the centers collected so far.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-lloyd Lloyd's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The most common K-means method is Lloyd's algorithm
@cite{lloyd82least}. This algorithm is based on the observation that,
while jointly optimizing clusters and assignment is difficult,
optimizing one given the other is easy. Lloyd's algorithm alternates
the steps:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This requires finding for each point the
closest among $K$ other points, which is potentially slow.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
A naive implementation of the assignment step requires $O(dnK)$
operations, where $d$ is the dimensionality of the data, $n$ the
number of data points, and $K$ the number of centers. Updating the
centers is much cheaper: $O(dn)$ operations suffice to compute the $K$
means and a slightly higher cost is required for the medians. Clearly,
the bottleneck is the assignment computation, and this is what the
other K-means algorithm try to improve.
During the iterations, it can happen that a cluster becomes empty. In
this case, K-means automatically **“restarts” the
cluster** center by selecting a training point at random.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-elkan Elkan's algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
Elkan's algorithm @cite{elkan03using} is a variation of Lloyd
alternate optimization algorithm (@ref kmeans-lloyd) that uses the
triangular inequality to avoid many distance calculations when
assigning points to clusters. While much faster than Lloyd, Elkan's
method uses storage proportional to the umber of clusters by data
points, which makes it unpractical for a very large number of
clusters.
The idea of this algorithm is that, if a center update does not move
them much, then most of the point-to-center computations can be
avoided when the point-to-center assignments are recomputed. To detect
which distances need evaluation, the triangular inequality is used to
lower and upper bound distances after a center update.
Elkan algorithms uses two key observations. First, one has
\[
\|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2
\quad\Rightarrow\quad
\|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p.
\]
Thus if the distance between $\bx_i$ and its current center
$\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$
to another center $\bc$, then $\bc$ can be skipped when the new
assignment for $\bx_i$ is searched. Checking this requires keeping
track of all the inter-center distances, but centers are typically a
small fraction of the training data, so overall this can be a
significant saving. In particular, if this condition is satisfied for
all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be
skipped completely. Furthermore, the condition can be tested also
based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$.
Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new
distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by
\[
\|\bx - \bc\|_p - \|bc - \hat\bc\|_p
\leq
\|\bx - \hat{\bc}\|_p
\leq
\|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p.
\]
This allows to maintain an upper bound on the distance of $\bx_i$ to
its current center $\bc_{q_i}$ and a lower bound to any other center
$\bc$:
@f{align*}
UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\
LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p.
@f}
Thus the K-means algorithm becomes:
1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for
all points and centers. Find the current assignments $q_i$ and
bounds $UB_i$ by finding the closest centers to each point: $UB_i =
\min_{\bc} LB_i(\bc)$.
2. **Center estimation.**
1. Recompute all the centers based on the new means; call the updated
version $\hat{\bc}$.
2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$
as explained above.
3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next
iteration.
3. **Quantization.**
1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$
for all centers $\bc \not= \bc_{q_i}$.
2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$:
1. Skip $\bc$ if
\[
UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \|
\quad\text{or}\quad
UB_i \leq LB_i(\bc).
\]
The first condition reflects the first observation above; the
second uses the bounds to decide if $\bc$ can be closer than the
current center $\bc_{q_i}$ to the point $\bx_i$. If the center
cannot be skipped, continue as follows.
3. Skip $\bc$ if the condition above is satisfied after making the
upper bound tight:
\[
UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p.
\]
Note that the latter calculation can be done only once for $\bx_i$.
If the center cannot be skipped still, continue as follows.
4. Tighten the lower bound too:
\[
LB_i(\bc) = \| \bx_i - \bc \|_p.
\]
At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i <
UB_i$, then the point $\bx_i$ should be reassigned to
$\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i
= LB_i(\bc)$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section kmeans-ann ANN algorithm
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *Approximate Nearest Neighbor* (ANN) K-means algorithm
@cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a
variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first
randomized KD-tree algorithm to approximately (and quickly) find the
closest cluster center to each point. The KD-tree implementation is
based on @ref kdtree.
The algorithm can be summarized as follows:
1. **Quantization.** Each point $\bx_i$ is reassigned to the center
$\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers
by a KD-tree and then using the latter to quickly find the closest
center for every training point. The search is approximated to
further improve speed. This opens up the possibility that a data
point may receive an assignment that is *worse* than the current
one. This is avoided by checking that the new assignment estimated
by using ANN is an improvement; otherwise the old assignment is
kept.
2. **Center estimation.** Each center $\bc_q$ is updated to minimize
its average distances to the points assigned to it. It is easy to
show that the best center is the mean or median of the points,
respectively if the $l^2$ or $l^1$ norm is considered.
The key is to trade-off carefully the speedup obtained by using the
ANN algorithm and the loss in accuracy when retrieving neighbors. Due
to the curse of dimensionality, KD-trees become less effective for
higher dimensional data, so that the search cost, which in the best
case is logarithmic with this data structure, may become effectively
linear. This is somehow mitigated by the fact that new a new KD-tree
is computed at each iteration, reducing the likelihood that points may
get stuck with sub-optimal assignments.
Experiments with the quantization of 128-dimensional SIFT features
show that the ANN algorithm may use one quarter of the comparisons of
Elkan's while retaining a similar solution accuracy.
*/
#include "kmeans.h"
#include "generic.h"
#include "mathop.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Reset state
**
** The function reset the state of the KMeans object. It deletes
** any stored centers, releasing the corresponding memory. This
** cancels the effect of seeding or setting the centers, but
** does not change the other configuration parameters.
**/
VL_EXPORT void
vl_kmeans_reset (VlKMeans * self)
{
self->numCenters = 0 ;
self->dimension = 0 ;
if (self->centers) vl_free(self->centers) ;
if (self->centerDistances) vl_free(self->centerDistances) ;
self->centers = NULL ;
self->centerDistances = NULL ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object
** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE)
** @param distance distance.
** @return new KMeans object instance.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new (vl_type dataType,
VlVectorComparisonType distance)
{
VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ;
self->algorithm = VlKMeansLloyd ;
self->distance = distance ;
self->dataType = dataType ;
self->verbosity = 0 ;
self->maxNumIterations = 100 ;
self->minEnergyVariation = 1e-4 ;
self->numRepetitions = 1 ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = 3;
self->maxNumComparisons = 100;
vl_kmeans_reset (self) ;
return self ;
}
/** ------------------------------------------------------------------
** @brief Create a new KMeans object by copy
** @param kmeans KMeans object to copy.
** @return new copy.
**/
VL_EXPORT VlKMeans *
vl_kmeans_new_copy (VlKMeans const * kmeans)
{
VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ;
self->algorithm = kmeans->algorithm ;
self->distance = kmeans->distance ;
self->dataType = kmeans->dataType ;
self->verbosity = kmeans->verbosity ;
self->maxNumIterations = kmeans->maxNumIterations ;
self->numRepetitions = kmeans->numRepetitions ;
self->dimension = kmeans->dimension ;
self->numCenters = kmeans->numCenters ;
self->centers = NULL ;
self->centerDistances = NULL ;
self->numTrees = kmeans->numTrees;
self->maxNumComparisons = kmeans->maxNumComparisons;
if (kmeans->centers) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ;
self->centers = vl_malloc(dataSize) ;
memcpy (self->centers, kmeans->centers, dataSize) ;
}
if (kmeans->centerDistances) {
vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ;
self->centerDistances = vl_malloc(dataSize) ;
memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ;
}
return self ;
}
/** ------------------------------------------------------------------
** @brief Deletes a KMeans object
** @param self KMeans object instance.
**
** The function deletes the KMeans object instance created
** by ::vl_kmeans_new.
**/
VL_EXPORT void
vl_kmeans_delete (VlKMeans * self)
{
vl_kmeans_reset (self) ;
vl_free (self) ;
}
/* an helper structure */
typedef struct _VlKMeansSortWrapper {
vl_uint32 * permutation ;
void const * data ;
vl_size stride ;
} VlKMeansSortWrapper ;
/* ---------------------------------------------------------------- */
/* Instantiate shuffle algorithm */
#define VL_SHUFFLE_type vl_uindex
#define VL_SHUFFLE_prefix _vl_kmeans
#include "shuffle-def.h"
/* #ifdef VL_KMEANS_INSTANTITATING */
#endif
/* ================================================================ */
#ifdef VL_KMEANS_INSTANTIATING
/* ---------------------------------------------------------------- */
/* Set centers */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_set_centers_, SFX)
(VlKMeans * self,
TYPE const * centers,
vl_size dimension,
vl_size numCenters)
{
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
memcpy ((TYPE*)self->centers, centers,
sizeof(TYPE) * dimension * numCenters) ;
}
/* ---------------------------------------------------------------- */
/* Random seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex i, j, k ;
VlRand * rand = vl_get_rand () ;
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
{
vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ;
/* get a random permutation of the data point */
for (i = 0 ; i < numData ; ++i) perm[i] = i ;
_vl_kmeans_shuffle (perm, numData, rand) ;
for (k = 0, i = 0 ; k < numCenters ; ++ i) {
/* compare the next data point to all centers collected so far
to detect duplicates (if there are enough left)
*/
if (numCenters - k < numData - i) {
vl_bool duplicateDetected = VL_FALSE ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances,
dimension,
data + dimension * perm[i], 1,
(TYPE*)self->centers, k,
distFn) ;
for (j = 0 ; j < k ; ++j) {
duplicateDetected |= (distances[j] == 0) ;
}
if (duplicateDetected) continue ;
}
/* ok, it is not a duplicate so we can accept it! */
memcpy ((TYPE*)self->centers + dimension * k,
data + dimension * perm[i],
sizeof(TYPE) * dimension) ;
k ++ ;
}
vl_free(distances) ;
vl_free(perm) ;
}
}
/* ---------------------------------------------------------------- */
/* kmeans++ seeding */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex x, c ;
VlRand * rand = vl_get_rand () ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
self->dimension = dimension ;
self->numCenters = numCenters ;
self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = (TYPE) VL_INFINITY_D ;
}
/* select the first point at random */
x = vl_rand_uindex (rand, numData) ;
c = 0 ;
while (1) {
TYPE energy = 0 ;
TYPE acc = 0 ;
TYPE thresh = (TYPE) vl_rand_real1 (rand) ;
memcpy ((TYPE*)self->centers + c * dimension,
data + x * dimension,
sizeof(TYPE) * dimension) ;
c ++ ;
if (c == numCenters) break ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)
(distances,
dimension,
(TYPE*)self->centers + (c - 1) * dimension, 1,
data, numData,
distFn) ;
for (x = 0 ; x < numData ; ++x) {
minDistances[x] = VL_MIN(minDistances[x], distances[x]) ;
energy += minDistances[x] ;
}
for (x = 0 ; x < numData - 1 ; ++x) {
acc += minDistances[x] ;
if (acc >= thresh * energy) break ;
}
}
vl_free(distances) ;
vl_free(minDistances) ;
}
/* ---------------------------------------------------------------- */
/* Quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData)
{
vl_index i ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
#ifdef _OPENMP
#pragma omp parallel \
shared(self, distances, assignments, numData, distFn, data) \
num_threads(vl_get_max_threads())
#endif
{
/* vl_malloc cannot be used here if mapped to MATLAB malloc */
TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ;
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0 ; i < (signed)numData ; ++i) {
vl_uindex k ;
TYPE bestDistance = (TYPE) VL_INFINITY_D ;
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters,
self->dimension,
data + self->dimension * i, 1,
(TYPE*)self->centers, self->numCenters,
distFn) ;
for (k = 0 ; k < self->numCenters ; ++k) {
if (distanceToCenters[k] < bestDistance) {
bestDistance = distanceToCenters[k] ;
assignments[i] = (vl_uint32)k ;
}
}
if (distances) distances[i] = bestDistance ;
}
free(distanceToCenters) ;
}
}
/* ---------------------------------------------------------------- */
/* ANN quantization */
/* ---------------------------------------------------------------- */
static void
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)
(VlKMeans * self,
vl_uint32 * assignments,
TYPE * distances,
TYPE const * data,
vl_size numData,
vl_bool update)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ;
vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons);
vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN);
vl_kdforest_build(forest,self->numCenters,self->centers);
#ifdef _OPENMP
#pragma omp parallel default(none) \
num_threads(vl_get_max_threads()) \
shared(self, forest, update, assignments, distances, data, numData, distFn)
#endif
{
VlKDForestNeighbor neighbor ;
VlKDForestSearcher * searcher ;
vl_index x;
#ifdef _OPENMP
#pragma omp critical
#endif
searcher = vl_kdforest_new_searcher (forest) ;
#ifdef _OPENMP
#pragma omp for
#endif
for(x = 0 ; x < (signed)numData ; ++x) {
vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension));
if (distances) {
if(!update) {
distances[x] = (TYPE) neighbor.distance;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
TYPE prevDist = (TYPE) distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension *assignments[x]);
if (prevDist > (TYPE) neighbor.distance) {
distances[x] = (TYPE) neighbor.distance ;
assignments[x] = (vl_uint32) neighbor.index ;
} else {
distances[x] = prevDist ;
}
}
} else {
assignments[x] = (vl_uint32) neighbor.index ;
}
} /* end for */
} /* end of parallel region */
vl_kdforest_delete(forest);
}
/* ---------------------------------------------------------------- */
/* Helper functions */
/* ---------------------------------------------------------------- */
/* The sorting routine is used to find increasing permutation of each
* data dimension. This is used to quickly find the median for l1
* distance clustering. */
VL_INLINE TYPE
VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
return
((TYPE*)array->data) [array->permutation[indexA] * array->stride]
-
((TYPE*)array->data) [array->permutation[indexB] * array->stride] ;
}
VL_INLINE void
VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
(VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB)
{
vl_uint32 tmp = array->permutation[indexA] ;
array->permutation[indexA] = array->permutation[indexB] ;
array->permutation[indexB] = tmp ;
}
#define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort)
#define VL_QSORT_array VlKMeansSortWrapper*
#define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp)
#define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap)
#include "qsort-def.h"
static void
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)
(VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData)
{
vl_uindex d, x ;
for (d = 0 ; d < self->dimension ; ++d) {
VlKMeansSortWrapper array ;
array.permutation = permutations + d * numData ;
array.data = data + d ;
array.stride = self->dimension ;
for (x = 0 ; x < numData ; ++x) {
array.permutation[x] = (vl_uint32)x ;
}
VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ;
}
}
/* ---------------------------------------------------------------- */
/* Lloyd refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double previousEnergy = VL_INFINITY_D ;
double initialEnergy = VL_INFINITY_D ;
double energy ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
} /* next Lloyd iteration */
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
static double
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)
(VlKMeans * self)
{
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
if (! self->centerDistances) {
self->centerDistances = vl_malloc (sizeof(TYPE) *
self->numCenters *
self->numCenters) ;
}
VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances,
self->dimension,
self->centers, self->numCenters,
NULL, 0,
distFn) ;
return self->numCenters * (self->numCenters - 1) / 2 ;
}
static double
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size c, d, x, iteration ;
double initialEnergy = VL_INFINITY_D ;
double previousEnergy = VL_INFINITY_D ;
double energy ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
VlRand * rand = vl_get_rand () ;
vl_size totNumRestartedCenters = 0 ;
vl_size numRestartedCenters = 0 ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
for (energy = VL_INFINITY_D,
iteration = 0;
1 ;
++ iteration) {
/* assign data to cluters */
VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ;
/* compute energy */
energy = 0 ;
for (x = 0 ; x < numData ; ++x) energy += distances[x] ;
if (self->verbosity) {
VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration,
energy) ;
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ;
}
break ;
}
if (energy == previousEnergy) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ;
}
break ;
}
if (iteration == 0) {
initialEnergy = energy ;
} else {
double eps = (previousEnergy - energy) / (initialEnergy - energy) ;
if (eps < self->minEnergyVariation) {
if (self->verbosity) {
VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ;
}
break ;
}
}
/* begin next iteration */
previousEnergy = energy ;
/* update clusters */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
numRestartedCenters = 0 ;
switch (self->distance) {
case VlDistanceL2:
memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < numData ; ++x) {
TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
((TYPE*)self->centers) [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = (TYPE*)self->centers + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
}
break ;
default:
VL_PRINT("bad distance set: %d\n",self->distance);
abort();
} /* done compute centers */
totNumRestartedCenters += numRestartedCenters ;
if (self->verbosity && numRestartedCenters) {
VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration,
numRestartedCenters) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
return energy ;
}
/* ---------------------------------------------------------------- */
/* Elkan refinement */
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
vl_size d, iteration ;
vl_index x ;
vl_uint32 c, j ;
vl_bool allDone ;
TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ;
vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ;
vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ;
VlRand * rand = vl_get_rand () ;
#if (FLT == VL_TYPE_FLOAT)
VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ;
#else
VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ;
#endif
TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ;
vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ;
TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ;
TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ;
TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ;
vl_uint32 * permutations = NULL ;
vl_size * numSeenSoFar = NULL ;
double energy ;
vl_size totDistanceComputationsToInit = 0 ;
vl_size totDistanceComputationsToRefreshUB = 0 ;
vl_size totDistanceComputationsToRefreshLB = 0 ;
vl_size totDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size totDistanceComputationsToNewCenters = 0 ;
vl_size totDistanceComputationsToFinalize = 0 ;
vl_size totNumRestartedCenters = 0 ;
if (self->distance == VlDistanceL1) {
permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ;
numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ;
VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Initialization */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* An iteration is: get_new_centers + reassign + get_energy.
This counts as iteration 0, where get_new_centers is assumed
to be performed before calling the train function by
the initialization function */
/* update distances between centers */
totDistanceComputationsToInit +=
VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
/* assigmen points to the initial centers and initialize bounds */
memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE distance ;
/* do the first center */
assignments[x] = 0 ;
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + 0) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[0 + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
/* do other centers */
for (c = 1 ; c < self->numCenters ; ++c) {
/* Can skip if the center assigned so far is twice as close
as its distance to the center under consideration */
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <=
((TYPE*)self->centerDistances)
[c + assignments[x] * self->numCenters]) {
continue ;
}
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
pointToCenterLB[c + x * self->numCenters] = distance ;
totDistanceComputationsToInit += 1 ;
if (distance < pointToClosestCenterUB[x]) {
pointToClosestCenterUB[x] = distance ;
assignments[x] = c ;
}
}
}
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n",
energy, totDistanceComputationsToInit) ;
}
/* #define SANITY*/
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after initial assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n",
cc, xx, a, b) ;
}
}
}
#endif
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Iterations */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
for (iteration = 1 ; 1; ++iteration) {
vl_size numDistanceComputationsToRefreshUB = 0 ;
vl_size numDistanceComputationsToRefreshLB = 0 ;
vl_size numDistanceComputationsToRefreshCenterDistances = 0 ;
vl_size numDistanceComputationsToNewCenters = 0 ;
vl_size numRestartedCenters = 0 ;
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Compute new centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
memset(clusterMasses, 0, sizeof(vl_size) * numData) ;
for (x = 0 ; x < (signed)numData ; ++x) {
clusterMasses[assignments[x]] ++ ;
}
switch (self->distance) {
case VlDistanceL2:
memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ;
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE * cpt = newCenters + assignments[x] * self->dimension ;
TYPE const * xpt = data + x * self->dimension ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] += xpt[d] ;
}
}
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE * cpt = newCenters + c * self->dimension ;
if (clusterMasses[c] > 0) {
TYPE mass = clusterMasses[c] ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] /= mass ;
}
} else {
/* restart the center */
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
case VlDistanceL1:
for (d = 0 ; d < self->dimension ; ++d) {
vl_uint32 * perm = permutations + d * numData ;
memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ;
for (x = 0; x < (signed)numData ; ++x) {
c = assignments[perm[x]] ;
if (2 * numSeenSoFar[c] < clusterMasses[c]) {
newCenters [d + c * self->dimension] =
data [d + perm[x] * self->dimension] ;
}
numSeenSoFar[c] ++ ;
}
}
/* restart the centers as required */
for (c = 0 ; c < self->numCenters ; ++c) {
if (clusterMasses[c] == 0) {
TYPE * cpt = newCenters + c * self->dimension ;
vl_uindex x = vl_rand_uindex(rand, numData) ;
numRestartedCenters ++ ;
for (d = 0 ; d < self->dimension ; ++d) {
cpt[d] = data[x * self->dimension + d] ;
}
}
}
break ;
default:
abort();
} /* done compute centers */
/* compute the distance from the old centers to the new centers */
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE distance = distFn(self->dimension,
newCenters + c * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
centerToNewCenterDistances[c] = distance ;
numDistanceComputationsToNewCenters += 1 ;
}
/* make the new centers current */
{
TYPE * tmp = self->centers ;
self->centers = newCenters ;
newCenters = tmp ;
}
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/* Reassign points to a centers */
/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
/*
Update distances between centers.
*/
numDistanceComputationsToRefreshCenterDistances
+= VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ;
for (c = 0 ; c < self->numCenters ; ++c) {
nextCenterDistances[c] = (TYPE) VL_INFINITY_D ;
for (j = 0 ; j < self->numCenters ; ++j) {
if (j == c) continue ;
nextCenterDistances[c] = VL_MIN(nextCenterDistances[c],
((TYPE*)self->centerDistances)
[j + c * self->numCenters]) ;
}
}
/*
Update upper bounds on point-to-closest-center distances
based on the center variation.
*/
for (x = 0 ; x < (signed)numData ; ++x) {
TYPE a = pointToClosestCenterUB[x] ;
TYPE b = centerToNewCenterDistances[assignments[x]] ;
if (self->distance == VlDistanceL1) {
pointToClosestCenterUB[x] = a + b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ;
}
pointToClosestCenterUBIsStrict[x] = VL_FALSE ;
}
/*
Update lower bounds on point-to-center distances
based on the center variation.
*/
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++x) {
for (c = 0 ; c < self->numCenters ; ++c) {
TYPE a = pointToCenterLB[c + x * self->numCenters] ;
TYPE b = centerToNewCenterDistances[c] ;
if (a < b) {
pointToCenterLB[c + x * self->numCenters] = 0 ;
} else {
if (self->distance == VlDistanceL1) {
pointToCenterLB[c + x * self->numCenters] = a - b ;
} else {
#if (FLT == VL_TYPE_FLOAT)
TYPE sqrtab = sqrtf (a * b) ;
#else
TYPE sqrtab = sqrt (a * b) ;
#endif
pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ;
}
}
}
}
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies before assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/*
Scan the data and do the reassignments. Use the bounds to
skip as many point-to-center distance calculations as possible.
*/
allDone = VL_TRUE ;
#if defined(_OPENMP)
#pragma omp parallel for \
default(none) \
shared(self,numData, \
pointToClosestCenterUB,pointToCenterLB, \
nextCenterDistances,pointToClosestCenterUBIsStrict, \
assignments,data,distFn,allDone) \
private(c,x) \
reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \
num_threads(vl_get_max_threads())
#endif
for (x = 0 ; x < (signed)numData ; ++ x) {
/*
A point x sticks with its current center assignmets[x]
the UB to d(x, c[assigmnets[x]]) is not larger than half
the distance of c[assigments[x]] to any other center c.
*/
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) {
continue ;
}
for (c = 0 ; c < self->numCenters ; ++c) {
vl_uint32 cx = assignments[x] ;
TYPE distance ;
/* The point is not reassigned to a given center c
if either:
0 - c is already the assigned center
1 - The UB of d(x, c[assignments[x]]) is smaller than half
the distance of c[assigments[x]] to c, OR
2 - The UB of d(x, c[assignmets[x]]) is smaller than the
LB of the distance of x to c.
*/
if (cx == c) {
continue ;
}
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
/* If the UB is loose, try recomputing it and test again */
if (! pointToClosestCenterUBIsStrict[x]) {
distance = distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
pointToClosestCenterUB[x] = distance ;
pointToClosestCenterUBIsStrict[x] = VL_TRUE ;
pointToCenterLB[cx + x * self->numCenters] = distance ;
numDistanceComputationsToRefreshUB += 1 ;
if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) *
pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances)
[c + cx * self->numCenters]) {
continue ;
}
if (pointToClosestCenterUB[x] <= pointToCenterLB
[c + x * self->numCenters]) {
continue ;
}
}
/*
Now the UB is strict (equal to d(x, assignments[x])), but
we still could not exclude that x should be reassigned to
c. We therefore compute the distance, update the LB,
and check if a reassigmnet must be made
*/
distance = distFn(self->dimension,
data + x * self->dimension,
(TYPE*)self->centers + c * self->dimension) ;
numDistanceComputationsToRefreshLB += 1 ;
pointToCenterLB[c + x * self->numCenters] = distance ;
if (distance < pointToClosestCenterUB[x]) {
assignments[x] = c ;
pointToClosestCenterUB[x] = distance ;
allDone = VL_FALSE ;
/* the UB strict flag is already set here */
}
} /* assign center */
} /* next data point */
totDistanceComputationsToRefreshUB
+= numDistanceComputationsToRefreshUB ;
totDistanceComputationsToRefreshLB
+= numDistanceComputationsToRefreshLB ;
totDistanceComputationsToRefreshCenterDistances
+= numDistanceComputationsToRefreshCenterDistances ;
totDistanceComputationsToNewCenters
+= numDistanceComputationsToNewCenters ;
totNumRestartedCenters
+= numRestartedCenters ;
#ifdef SANITY
{
int xx ;
int cc ;
TYPE tol = 1e-5 ;
VL_PRINTF("inconsistencies after assignments:\n");
for (xx = 0 ; xx < numData ; ++xx) {
for (cc = 0 ; cc < self->numCenters ; ++cc) {
TYPE a = pointToCenterLB[cc + xx * self->numCenters] ;
TYPE b = distFn(self->dimension,
data + self->dimension * xx,
(TYPE*)self->centers + self->dimension * cc) ;
if (cc == assignments[xx]) {
TYPE z = pointToClosestCenterUB[xx] ;
if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n",
cc, xx, z, b) ;
}
if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n",
cc, xx, a, b, assignments[xx]) ;
}
}
}
#endif
/* compute UB on energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++x) {
energy += pointToClosestCenterUB[x] ;
}
if (self->verbosity) {
vl_size numDistanceComputations =
numDistanceComputationsToRefreshUB +
numDistanceComputationsToRefreshLB +
numDistanceComputationsToRefreshCenterDistances +
numDistanceComputationsToNewCenters ;
VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n",
iteration,
energy,
numDistanceComputations) ;
if (numRestartedCenters) {
VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n",
iteration,
energy,
numRestartedCenters) ;
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: "
"UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d)\n",
iteration,
100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations,
numDistanceComputationsToRefreshUB,
100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations,
numDistanceComputationsToRefreshLB,
100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations,
numDistanceComputationsToRefreshCenterDistances,
100.0 * numDistanceComputationsToNewCenters / numDistanceComputations,
numDistanceComputationsToNewCenters) ;
}
}
/* check termination conditions */
if (iteration >= self->maxNumIterations) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ;
}
break ;
}
if (allDone) {
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ;
}
break ;
}
} /* next Elkan iteration */
/* compute true energy */
energy = 0 ;
for (x = 0 ; x < (signed)numData ; ++ x) {
vl_uindex cx = assignments [x] ;
energy += distFn(self->dimension,
data + self->dimension * x,
(TYPE*)self->centers + self->dimension * cx) ;
totDistanceComputationsToFinalize += 1 ;
}
{
vl_size totDistanceComputations =
totDistanceComputationsToInit +
totDistanceComputationsToRefreshUB +
totDistanceComputationsToRefreshLB +
totDistanceComputationsToRefreshCenterDistances +
totDistanceComputationsToNewCenters +
totDistanceComputationsToFinalize ;
double saving = (double)totDistanceComputations
/ (iteration * self->numCenters * numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n",
totDistanceComputations, saving * 100.0) ;
if (totNumRestartedCenters) {
VL_PRINTF("kmeans: Elkan: there have been %d restarts\n",
totNumRestartedCenters) ;
}
}
if (self->verbosity > 1) {
VL_PRINTF("kmeans: Elkan: total dist. calc. per type: "
"init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), "
"intra_center: %.1f%% (%d), "
"new_center: %.1f%% (%d), "
"finalize: %.1f%% (%d)\n",
100.0 * totDistanceComputationsToInit / totDistanceComputations,
totDistanceComputationsToInit,
100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations,
totDistanceComputationsToRefreshUB,
100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations,
totDistanceComputationsToRefreshLB,
100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations,
totDistanceComputationsToRefreshCenterDistances,
100.0 * totDistanceComputationsToNewCenters / totDistanceComputations,
totDistanceComputationsToNewCenters,
100.0 * totDistanceComputationsToFinalize / totDistanceComputations,
totDistanceComputationsToFinalize) ;
}
}
if (permutations) {
vl_free(permutations) ;
}
if (numSeenSoFar) {
vl_free(numSeenSoFar) ;
}
vl_free(distances) ;
vl_free(assignments) ;
vl_free(clusterMasses) ;
vl_free(nextCenterDistances) ;
vl_free(pointToClosestCenterUB) ;
vl_free(pointToClosestCenterUBIsStrict) ;
vl_free(pointToCenterLB) ;
vl_free(newCenters) ;
vl_free(centerToNewCenterDistances) ;
return energy ;
}
/* ---------------------------------------------------------------- */
static double
VL_XCAT(_vl_kmeans_refine_centers_, SFX)
(VlKMeans * self,
TYPE const * data,
vl_size numData)
{
switch (self->algorithm) {
case VlKMeansLloyd:
return
VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ;
break ;
case VlKMeansElkan:
return
VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ;
break ;
case VlKMeansANN:
return
VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ;
break ;
default:
abort() ;
}
}
/* VL_KMEANS_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_KMEANS_INSTANTIATING
#include "kmeans.c"
#endif
/* VL_KMEANS_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_KMEANS_INSTANTIATING
/** ------------------------------------------------------------------
** @brief Set centers
** @param self KMeans object.
** @param centers centers to copy.
** @param dimension data dimension.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_set_centers
(VlKMeans * self,
void const * centers,
vl_size dimension,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_set_centers_f
(self, (float const *)centers, dimension, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_set_centers_d
(self, (double const *)centers, dimension, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief init centers by randomly sampling data
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**
** The function inits the KMeans centers by randomly sampling
** the data @a data.
**/
VL_EXPORT void
vl_kmeans_init_centers_with_rand_data
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_with_rand_data_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_with_rand_data_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Seed centers by the KMeans++ algorithm
** @param self KMeans object.
** @param data data to sample from.
** @param dimension data dimension.
** @param numData nmber of data points.
** @param numCenters number of centers.
**/
VL_EXPORT void
vl_kmeans_init_centers_plus_plus
(VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_kmeans_reset (self) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_init_centers_plus_plus_f
(self, (float const *)data, dimension, numData, numCenters) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_init_centers_plus_plus_d
(self, (double const *)data, dimension, numData, numCenters) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data
** @param self KMeans object.
** @param assignments data to closest center assignments (output).
** @param distances data to closest center distance (output).
** @param data data to quantize.
** @param numData number of data points to quantize.
**/
VL_EXPORT void
vl_kmeans_quantize
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_f
(self, assignments, distances, (float const *)data, numData) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_d
(self, assignments, distances, (double const *)data, numData) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Quantize data using approximate nearest neighbours (ANN).
** @param self KMeans object.
** @param assignments data to centers assignments (output).
** @param distances data to closes center distance (output)
** @param data data to quantize.
** @param numData number of data points.
** @param update choose wether to update current assignments.
**
** The function uses an ANN procedure to compute the approximate
** nearest neighbours of the input data point.
**
** Setting @a update to ::VL_TRUE will cause the algorithm
** to *update existing assignments*. This means that each
** element of @a assignments and @a distances is updated ony if the
** ANN procedure can find a better assignment of the existing one.
**/
VL_EXPORT void
vl_kmeans_quantize_ann
(VlKMeans * self,
vl_uint32 * assignments,
void * distances,
void const * data,
vl_size numData,
vl_bool update)
{
switch (self->dataType) {
case VL_TYPE_FLOAT :
_vl_kmeans_quantize_ann_f
(self, assignments, distances, (float const *)data, numData, update) ;
break ;
case VL_TYPE_DOUBLE :
_vl_kmeans_quantize_ann_d
(self, assignments, distances, (double const *)data, numData, update) ;
break ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Refine center locations.
** @param self KMeans object.
** @param data data to quantize.
** @param numData number of data points.
** @return K-means energy at the end of optimization.
**
** The function calls the underlying K-means quantization algorithm
** (@ref VlKMeansAlgorithm) to quantize the specified data @a data.
** The function assumes that the cluster centers have already
** been assigned by using one of the seeding functions, or by
** setting them.
**/
VL_EXPORT double
vl_kmeans_refine_centers
(VlKMeans * self,
void const * data,
vl_size numData)
{
assert (self->centers) ;
switch (self->dataType) {
case VL_TYPE_FLOAT :
return
_vl_kmeans_refine_centers_f
(self, (float const *)data, numData) ;
case VL_TYPE_DOUBLE :
return
_vl_kmeans_refine_centers_d
(self, (double const *)data, numData) ;
default:
abort() ;
}
}
/** ------------------------------------------------------------------
** @brief Cluster data.
** @param self KMeans object.
** @param data data to quantize.
** @param dimension data dimension.
** @param numData number of data points.
** @param numCenters number of clusters.
** @return K-means energy at the end of optimization.
**
** The function initializes the centers by using the initialization
** algorithm set by ::vl_kmeans_set_initialization and refines them
** by the quantization algorithm set by ::vl_kmeans_set_algorithm.
** The process is repeated one or more times (see
** ::vl_kmeans_set_num_repetitions) and the resutl with smaller
** energy is retained.
**/
VL_EXPORT double
vl_kmeans_cluster (VlKMeans * self,
void const * data,
vl_size dimension,
vl_size numData,
vl_size numCenters)
{
vl_uindex repetition ;
double bestEnergy = VL_INFINITY_D ;
void * bestCenters = NULL ;
for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) {
double energy ;
double timeRef ;
if (self->verbosity) {
VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ;
}
timeRef = vl_get_cpu_time() ;
switch (self->initialization) {
case VlKMeansRandomSelection :
vl_kmeans_init_centers_with_rand_data (self,
data, dimension, numData,
numCenters) ;
break ;
case VlKMeansPlusPlus :
vl_kmeans_init_centers_plus_plus (self,
data, dimension, numData,
numCenters) ;
break ;
default:
abort() ;
}
if (self->verbosity) {
VL_PRINTF("kmeans: K-means initialized in %.2f s\n",
vl_get_cpu_time() - timeRef) ;
}
timeRef = vl_get_cpu_time () ;
energy = vl_kmeans_refine_centers (self, data, numData) ;
if (self->verbosity) {
VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n",
vl_get_cpu_time() - timeRef, energy) ;
}
/* copy centers to output if current solution is optimal */
/* check repetition == 0 as well in case energy = NaN, which */
/* can happen if the data contain NaNs */
if (energy < bestEnergy || repetition == 0) {
void * temp ;
bestEnergy = energy ;
if (bestCenters == NULL) {
bestCenters = vl_malloc(vl_get_type_size(self->dataType) *
self->dimension *
self->numCenters) ;
}
/* swap buffers */
temp = bestCenters ;
bestCenters = self->centers ;
self->centers = temp ;
} /* better energy */
} /* next repetition */
vl_free (self->centers) ;
self->centers = bestCenters ;
return bestEnergy ;
}
/* VL_KMEANS_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_KMEANS_INSTANTIATING
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/opencl-private.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/threshold.h"
#ifdef MAGICKCORE_CLPERFMARKER
#include "CLPerfMarker.h"
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveBlurImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *AdaptiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict r;
IndexPacket
*magick_restrict blur_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
const double
*magick_restrict k;
ssize_t
i,
u,
v;
gamma=0.0;
i=CastDoubleToLong(ceil((double) width*QuantumScale*
GetPixelIntensity(edge_image,r)-0.5));
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel[i];
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveSharpenImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(sharp_image);
}
MagickExport Image *AdaptiveSharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sharp_image->exception);
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict r;
IndexPacket
*magick_restrict sharp_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view);
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
const double
*magick_restrict k;
ssize_t
i,
u,
v;
gamma=0.0;
i=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
k=kernel[i];
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception);
return(blur_image);
}
MagickExport Image *BlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image = NULL;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const size_t order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const size_t order,const double *kernel,
ExceptionInfo *exception)
{
Image
*convolve_image;
KernelInfo
*kernel_info;
ssize_t
i;
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=order;
kernel_info->height=order;
kernel_info->x=(ssize_t) (order-1)/2;
kernel_info->y=(ssize_t) (order-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (order*order); i++)
kernel_info->values[i]=kernel[i];
convolve_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
#endif
if (convolve_image == (Image *) NULL)
convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
ssize_t
i;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
size_t
length,
number_channels;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image, exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse)
{
InheritException(exception,&despeckle_image->exception);
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4);
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) number_channels; i++)
{
ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
if ((image->matte == MagickFalse) && (i == 3))
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: pixels[j]=GetPixelRed(p); break;
case 1: pixels[j]=GetPixelGreen(p); break;
case 2: pixels[j]=GetPixelBlue(p); break;
case 3: pixels[j]=GetPixelOpacity(p); break;
case 4: pixels[j]=GetPixelBlack(indexes+x); break;
default: break;
}
p++;
j++;
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(despeckle_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: SetPixelRed(q,pixels[j]); break;
case 1: SetPixelGreen(q,pixels[j]); break;
case 2: SetPixelBlue(q,pixels[j]); break;
case 3: SetPixelOpacity(q,pixels[j]); break;
case 4: SetPixelIndex(indexes+x,pixels[j]); break;
default: break;
}
q++;
j++;
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
{
status=MagickFalse;
break;
}
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
number_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info,
exception);
#endif
if (edge_image == (Image *) NULL)
edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,
1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info,
exception);
#endif
if (emboss_image == (Image *) NULL)
emboss_image=MorphologyImageChannel(image,DefaultChannels,
ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImageChannel(emboss_image,(ChannelType)
(AllChannels &~ SyncChannels));
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FilterImage() applies a custom convolution kernel to the image.
%
% The format of the FilterImage method is:
%
% Image *FilterImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
% Image *FilterImageChannel(const Image *image,const ChannelType channel,
% const KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel,
ExceptionInfo *exception)
{
Image
*filter_image;
filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception);
return(filter_image);
}
MagickExport Image *FilterImageChannel(const Image *image,
const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception)
{
#define FilterImageTag "Filter/Image"
CacheView
*filter_view,
*image_view;
Image
*filter_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
*filter_kernel;
ssize_t
i;
ssize_t
y;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
/*
Initialize filter image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((kernel->width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double)
kernel->height);
message=AcquireString("");
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) kernel->width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception);
if (filter_image != (Image *) NULL)
{
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
#endif
filter_image=CloneImage(image,0,0,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse)
{
InheritException(exception,&filter_image->exception);
filter_image=DestroyImage(filter_image);
return((Image *) NULL);
}
/*
Normalize kernel.
*/
filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*filter_kernel)));
if (filter_kernel == (MagickRealType *) NULL)
{
filter_image=DestroyImage(filter_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
filter_kernel[i]=(MagickRealType) kernel->values[i];
/*
Filter image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
filter_view=AcquireAuthenticCacheView(filter_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,filter_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict filter_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y-
(ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
pixel;
const MagickRealType
*magick_restrict k;
const PixelPacket
*magick_restrict kernel_pixels;
ssize_t
u;
ssize_t
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=filter_kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
const IndexPacket
*magick_restrict kernel_indexes;
k=filter_kernel;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
GetPixelOpacity(kernel_pixels+u)));
pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u);
pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u);
pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u);
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
const IndexPacket
*magick_restrict kernel_indexes;
k=filter_kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(filter_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FilterImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
filter_image->type=image->type;
filter_view=DestroyCacheView(filter_view);
image_view=DestroyCacheView(image_view);
filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel);
if (status == MagickFalse)
filter_image=DestroyImage(filter_image);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you.
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
% Image *GaussianBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *GaussianBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
#endif
if (blur_image == (Image *) NULL)
blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
% Image *MotionBlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double *GetMotionBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*motion_blur;
motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle,
exception);
return(motion_blur);
}
MagickExport Image *MotionBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
OffsetInfo
*offset;
PointInfo
point;
ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=CastDoubleToLong(ceil((double) (i*point.y)/
hypot(point.x,point.y)-0.5));
offset[i].y=CastDoubleToLong(ceil((double) (i*point.x)/
hypot(point.x,point.y)-0.5));
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset,
exception);
if (blur_image != (Image *) NULL)
return blur_image;
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict blur_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
qixel;
PixelPacket
pixel;
const IndexPacket
*magick_restrict indexes;
double
*magick_restrict k;
ssize_t
i;
k=kernel;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
qixel.red+=(*k)*pixel.red;
qixel.green+=(*k)*pixel.green;
qixel.blue+=(*k)*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*(*indexes);
}
k++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index));
}
else
{
double
alpha,
gamma;
alpha=0.0;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=(*k)*alpha*pixel.red;
qixel.green+=(*k)*alpha*pixel.green;
qixel.blue+=(*k)*alpha*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*alpha*GetPixelIndex(indexes);
}
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double width,
% const double sigma,ExceptionInfo *exception)
% Image *KuwaharaImageChannel(const Image *image,const ChannelType channel,
% const double width,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*kuwahara_image;
kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(kuwahara_image);
}
MagickExport Image *KuwaharaImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kiwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) channel;
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse)
{
InheritException(exception,&kuwahara_image->exception);
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1)
#endif
for (y=0; y < (ssize_t) kuwahara_image->rows; y++)
{
IndexPacket
*magick_restrict kuwahara_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view);
for (x=0; x < (ssize_t) kuwahara_image->columns; x++)
{
double
min_variance;
MagickPixelPacket
pixel;
RectangleInfo
quadrant,
target;
ssize_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const PixelPacket
*magick_restrict p;
double
variance;
MagickPixelPacket
mean;
const PixelPacket
*magick_restrict k;
ssize_t
n;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const PixelPacket *) NULL)
break;
GetMagickPixelPacket(image,&mean);
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
mean.red+=(double) k->red;
mean.green+=(double) k->green;
mean.blue+=(double) k->blue;
k++;
}
mean.red/=(double) (width*width);
mean.green/=(double) (width*width);
mean.blue/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(image,k);
variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean));
k++;
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolateMagickPixelPacket(gaussian_image,image_view,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,
(double) target.y+target.height/2.0,&pixel,exception);
if (status == MagickFalse)
break;
SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanline,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*interImage_info,
*scanline_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse)
{
InheritException(exception,&contrast_image->exception);
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanline_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanline));
if (scanline_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanline=(float *) GetVirtualMemoryBlob(scanline_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanline_info=RelinquishVirtualMemory(scanline_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(width+1)*(width+1);
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
float
*out,
*pix,
*pixels;
ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p++;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
float
*pix,
*pixels;
PixelPacket
*magick_restrict q;
ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
SetPixelRed(q,ClampToQuantum((MagickRealType) GetPixelRed(p)*mult));
SetPixelGreen(q,ClampToQuantum((MagickRealType) GetPixelGreen(p)*mult));
SetPixelBlue(q,ClampToQuantum((MagickRealType) GetPixelBlue(p)*mult));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanline_info=RelinquishVirtualMemory(scanline_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MaxTextExtent],
label[MaxTextExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel);
if (i == (NumberTiles/2))
{
(void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g",
degrees,2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImageChannel(preview_image,DefaultChannels,gamma);
(void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse);
(void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MaxTextExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MaxTextExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MaxTextExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MaxTextExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MaxTextExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"poisson",MaxTextExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"threshold %g",
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*
percentage/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g",
degrees,degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold);
(void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g",
0.5*degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MaxTextExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MaxTextExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MaxTextExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a rotational blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RotationalBlurImageChannel(const Image *image,
% const ChannelType channel,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the rotational blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception);
return(blur_image);
}
MagickExport Image *RotationalBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
PointInfo
blur_center;
ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRadialBlurImage(image,channel,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(MagickRealType) (n-1);
cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (MagickRealType *) NULL) ||
(sin_theta == (MagickRealType *) NULL))
{
if (cos_theta != (MagickRealType *) NULL)
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (MagickRealType *) NULL)
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(MagickRealType) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
IndexPacket
*magick_restrict blur_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
qixel;
MagickRealType
normalize,
radius;
PixelPacket
pixel;
PointInfo
center;
ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
normalize=0.0;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
qixel.red+=pixel.red;
qixel.green+=pixel.green;
qixel.blue+=pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*indexes);
}
normalize+=1.0;
}
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(normalize*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(normalize*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index));
}
else
{
double
alpha,
gamma;
alpha=1.0;
gamma=0.0;
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=alpha*pixel.red;
qixel.green+=alpha*pixel.green;
qixel.blue+=alpha*pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=alpha*(*indexes);
}
gamma+=alpha;
normalize+=1.0;
}
gamma=PerceptibleReciprocal(gamma);
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
% Image *SelectiveBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma,
threshold,exception);
return(blur_image);
}
MagickExport Image *SelectiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
double
*kernel;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
width*sizeof(*kernel)));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace);
if (status == MagickFalse)
{
InheritException(exception,&luminance_image->exception);
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
luminance_image=DestroyImage(luminance_image);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L));
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
gamma;
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict l,
*magick_restrict p;
IndexPacket
*magick_restrict blur_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
contrast;
DoublePixelPacket
pixel;
MagickRealType
intensity;
const double
*magick_restrict k;
ssize_t
u;
ssize_t
j,
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel;
intensity=GetPixelIntensity(image,p+center);
gamma=0.0;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.red+=(*k)*GetPixelRed(p+u+j);
pixel.green+=(*k)*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*GetPixelBlue(p+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
else
{
MagickRealType
alpha;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j));
pixel.red+=(*k)*alpha*GetPixelRed(p+u+j);
pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j);
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
gamma+=(*k)*alpha;
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
k++;
}
j+=(ssize_t) (image->columns+width);
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*
GetPixelAlpha(p+u+j));
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
p++;
l++;
q++;
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(double *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shade_image->exception);
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
MagickRealType
distance,
normal_distance,
shade;
PrimaryInfo
normal;
const PixelPacket
*magick_restrict p,
*magick_restrict s0,
*magick_restrict s1,
*magick_restrict s2;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
/*
Determine the surface normal and compute shading.
*/
s0=p+1;
s1=s0+image->columns+2;
s2=s1+image->columns+2;
normal.x=(double) (GetShadeIntensity(linear_image,s0-1)+
GetShadeIntensity(linear_image,s1-1)+
GetShadeIntensity(linear_image,s2-1)-
GetShadeIntensity(linear_image,s0+1)-
GetShadeIntensity(linear_image,s1+1)-
GetShadeIntensity(linear_image,s2+1));
normal.y=(double) (GetShadeIntensity(linear_image,s2-1)+
GetShadeIntensity(linear_image,s2)+
GetShadeIntensity(linear_image,s2+1)-
GetShadeIntensity(linear_image,s0-1)-
GetShadeIntensity(linear_image,s0)-
GetShadeIntensity(linear_image,s0+1));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z*
normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
if (gray != MagickFalse)
{
SetPixelRed(q,shade);
SetPixelGreen(q,shade);
SetPixelBlue(q,shade);
}
else
{
SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1)));
SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1)));
SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1)));
}
q->opacity=s1->opacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *SharpenImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception);
return(sharp_image);
}
MagickExport Image *SharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a block defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: Choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse)
{
InheritException(exception,&spread_image->exception);
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(spread_image,&bias);
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) spread_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(spread_view);
pixel=bias;
for (x=0; x < (ssize_t) spread_image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolateMagickPixelPacket(image,image_view,image->interpolate,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(spread_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double gain,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain,
threshold,exception);
return(sharp_image);
}
MagickExport Image *UnsharpMaskImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain,
threshold,exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels),
radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(MagickRealType) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
pixel;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict unsharp_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
if (fabs(2.0*pixel.red) < quantum_threshold)
pixel.red=(MagickRealType) GetPixelRed(p);
else
pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain);
SetPixelRed(q,ClampToQuantum(pixel.red));
}
if ((channel & GreenChannel) != 0)
{
pixel.green=GetPixelGreen(p)-(MagickRealType) q->green;
if (fabs(2.0*pixel.green) < quantum_threshold)
pixel.green=(MagickRealType) GetPixelGreen(p);
else
pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain);
SetPixelGreen(q,ClampToQuantum(pixel.green));
}
if ((channel & BlueChannel) != 0)
{
pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue;
if (fabs(2.0*pixel.blue) < quantum_threshold)
pixel.blue=(MagickRealType) GetPixelBlue(p);
else
pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain);
SetPixelBlue(q,ClampToQuantum(pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity;
if (fabs(2.0*pixel.opacity) < quantum_threshold)
pixel.opacity=(MagickRealType) GetPixelOpacity(p);
else
pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain);
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel.index=GetPixelIndex(indexes+x)-(MagickRealType)
GetPixelIndex(unsharp_indexes+x);
if (fabs(2.0*pixel.index) < quantum_threshold)
pixel.index=(MagickRealType) GetPixelIndex(indexes+x);
else
pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+
(pixel.index*gain);
SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
endif.c | /*
* test the handling of #endif at the end of a parallel region
* Extracted from BOTS
* Liao 1/15/2009
* */
#include <stdio.h>
#define FORCE_TIED_TASKS
void find_queens (int size)
{
int total_count=0;
#pragma omp parallel
{
#ifdef FORCE_TIED_TASKS
#pragma omp atomic
total_count += 1;
#endif
// printf("aa");
}
}
|
util.h | #ifndef _C_UTIL_
#define _C_UTIL_
#include <math.h>
#include <iostream>
#include <omp.h>
#include <sys/time.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
using std::endl;
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
//-------------------------------------------------------------------
//--initialize array with maximum limit
//-------------------------------------------------------------------
template<typename datatype>
void fill(datatype *A, const int n, const datatype maxi){
for (int j = 0; j < n; j++){
A[j] = ((datatype) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
//--print matrix
template<typename datatype>
void print_matrix(datatype *A, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
int idx = i*width + j;
std::cout<<A[idx]<<" ";
}
std::cout<<std::endl;
}
return;
}
//-------------------------------------------------------------------
//--verify results
//-------------------------------------------------------------------
#define MAX_RELATIVE_ERROR .002
template<typename datatype>
void verify_array(const datatype *cpuResults, const datatype *gpuResults, const int size){
bool passed = true;
#pragma omp parallel for
for (int i=0; i<size; i++){
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR){
passed = false;
}
}
if (passed){
std::cout << "--cambine:passed:-)" << std::endl;
}
else{
std::cout << "--cambine: failed:-(" << std::endl;
}
return ;
}
template<typename datatype>
void compare_results(const datatype *cpu_results, const datatype *gpu_results, const int size){
bool passed = true;
//#pragma omp parallel for
for (int i=0; i<size; i++){
if (cpu_results[i]!=gpu_results[i]){
passed = false;
}
}
if (passed){
std::cout << "--cambine:passed:-)" << std::endl;
}
else{
std::cout << "--cambine: failed:-(" << std::endl;
}
return ;
}
#endif
|
GB_binop__div_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_int32
// A.*B function (eWiseMult): GB_AemultB__div_int32
// A*D function (colscale): GB_AxD__div_int32
// D*A function (rowscale): GB_DxB__div_int32
// C+=B function (dense accum): GB_Cdense_accumB__div_int32
// C+=b function (dense accum): GB_Cdense_accumb__div_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int32
// C=scalar+B GB_bind1st__div_int32
// C=scalar+B' GB_bind1st_tran__div_int32
// C=A+scalar GB_bind2nd__div_int32
// C=A'+scalar GB_bind2nd_tran__div_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_SIGNED (x, y, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT32 || GxB_NO_DIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__div_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 32) ; \
}
GrB_Info GB_bind1st_tran__div_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 32) ; \
}
GrB_Info GB_bind2nd_tran__div_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
truecrypt_fmt_plug.c | /* TrueCrypt volume support to John The Ripper
*
* Written by Alain Espinosa <alainesp at gmail.com> in 2012. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2012 Alain Espinosa and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* Updated in Dec, 2014 by JimF. This is a ugly format, and was converted
* into a more standard (using crypt_all) format. The PKCS5_PBKDF2_HMAC can
* be replaced with faster pbkdf2_xxxx functions (possibly with SIMD usage).
* this has been done for sha512. ripemd160 and Whirlpool pbkdf2 header
* files have been created. Also, proper decrypt is now done, (in cmp_exact)
* and we test against the 'TRUE' signature, and against 2 crc32's which
* are computed over the 448 bytes of decrypted data. So we now have a
* full 96 bits of hash. There will be no way we get false positives from
* this slow format. EVP_AES_XTS removed. Also, we now only pbkdf2 over
* 64 bytes of data (all that is needed for the 2 AES keys), and that sped
* up the crypts A LOT (~3x faster)
*
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_truecrypt;
extern struct fmt_main fmt_truecrypt_ripemd160;
extern struct fmt_main fmt_truecrypt_sha512;
extern struct fmt_main fmt_truecrypt_whirlpool;
#elif FMT_REGISTERS_H
john_register_one(&fmt_truecrypt);
john_register_one(&fmt_truecrypt_ripemd160);
john_register_one(&fmt_truecrypt_sha512);
john_register_one(&fmt_truecrypt_whirlpool);
#else
#include "aes_xts.h"
#include <string.h>
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#define PBKDF2_HMAC_SHA512_ALSO_INCLUDE_CTX
#include "pbkdf2_hmac_sha512.h"
#include "pbkdf2_hmac_ripemd160.h"
#include "pbkdf2_hmac_whirlpool.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 4
#else
#define OMP_SCALE 1
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
/* 64 is the actual maximum used by Truecrypt software as of version 7.1a */
#define PLAINTEXT_LENGTH 64
#define MAX_CIPHERTEXT_LENGTH (512*2+32)
#define SALT_SIZE sizeof(struct cust_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static unsigned char (*first_block_dec)[16];
#define TAG_WHIRLPOOL "truecrypt_WHIRLPOOL$"
#define TAG_SHA512 "truecrypt_SHA_512$"
#define TAG_RIPEMD160 "truecrypt_RIPEMD_160$"
#define TAG_WHIRLPOOL_LEN (sizeof(TAG_WHIRLPOOL)-1)
#define TAG_SHA512_LEN (sizeof(TAG_SHA512)-1)
#define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1)
#define IS_SHA512 1
#define IS_RIPEMD160 2
#define IS_WHIRLPOOL 3
// borrowed from https://github.com/bwalex/tc-play
#define MAX_PASSSZ 64
#define PASS_BUFSZ 256
#define KPOOL_SZ 64
#define MAX_KFILE_SZ 1048576 /* 1 MB */
#define MAX_KEYFILES 256
// keyfile(s) data
unsigned char (*keyfiles_data)[MAX_KFILE_SZ];
int (*keyfiles_length);
struct cust_salt {
unsigned char salt[64];
// I 'thought' that bin[] could be removed, so that only salt[] was used
// for salt dupe-removal. That was wrong, bin[] must also be part of the
// salt dupe logic, or we will get wrong passwords found, if there is
// hashes with the same salts. bin[] array really is part of the salt
// since we decrypt it, to do the final check. So there is no real way
// to have any duplicate salts. in essense, we have a 'fixed' binary
// and the salt is the entire input hash. The fixed binary can be
// thought of as 'TRUE' (but it is more than this). It is simply we
// do not know the real binary until after we correctly decrypt.
// Initially I moved bin[] and ported to dyna_salt. All hashes in a
// test suite cracked, BUT the same password was used for all of them,
// the first password in the file. Not what we wanted.
unsigned char bin[512-64];
int loop_inc;
int num_iterations;
int hash_type;
int nkeyfiles;
} *psalt;
static struct fmt_tests tests_ripemd160[] = {
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{NULL}
};
static struct fmt_tests tests_sha512[] = {
{"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"},
{"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" },
{"truecrypt_SHA_512$cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" },
/* test vector with single keyfile, with data "1234567" */
{NULL}
};
static struct fmt_tests tests_whirlpool[] = {
{"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" },
{"truecrypt_WHIRLPOOL$0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" },
{NULL}
};
static struct fmt_tests tests_all[] = {
{"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"},
{"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" },
{TAG_SHA512"cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" },
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{TAG_RIPEMD160"6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" },
{TAG_WHIRLPOOL"0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" },
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*key_buffer));
first_block_dec = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*first_block_dec));
keyfiles_data = mem_calloc(MAX_KEYFILES,
sizeof(*keyfiles_data));
keyfiles_length = mem_calloc(MAX_KEYFILES,
sizeof(int));
}
static void done(void)
{
MEM_FREE(first_block_dec);
MEM_FREE(key_buffer);
MEM_FREE(keyfiles_data);
MEM_FREE(keyfiles_length);
}
static int valid(char* ciphertext, int pos)
{
unsigned int i;
char *p, *q;
int nkeyfiles = -1;
p = ciphertext + pos;
q = strchr(p, '$');
if (!q) { /* no keyfiles */
if(pos + 512*2 != strlen(ciphertext))
return 0;
} else {
if (q - p != 512 * 2)
return 0;
/* check keyfile(s) */
p = q + 1;
nkeyfiles = atoi(p);
if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1)
return 0;
}
// Not hexadecimal characters
for (i = 0; i < 512*2; i++) {
if (atoi16l[ARCH_INDEX((ciphertext+pos)[i])] == 0x7F)
return 0;
}
return 1;
}
static int valid_ripemd160(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN))
return 0;
return valid(ciphertext, TAG_RIPEMD160_LEN);
}
static int valid_sha512(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN))
return 0;
return valid(ciphertext, TAG_SHA512_LEN);
}
static int valid_whirlpool(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN))
return 0;
return valid(ciphertext, TAG_WHIRLPOOL_LEN);
}
static int valid_truecrypt(char *ciphertext, struct fmt_main *self) {
if (valid_sha512(ciphertext, self) ||
valid_ripemd160(ciphertext, self) ||
valid_whirlpool(ciphertext, self))
return 1;
return 0;
}
static void set_salt(void *salt)
{
psalt = salt;
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct cust_salt)+4];
struct cust_salt *s = (struct cust_salt *)mem_align(buf, 4);
unsigned int i;
char tpath[PATH_BUFFER_SIZE] = {0};
char *p, *q;
int idx;
FILE *fp;
size_t sz;
memset(s, 0, sizeof(struct cust_salt));
s->num_iterations = 1000;
s->loop_inc = 1;
if (!strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN)) {
ciphertext += TAG_WHIRLPOOL_LEN;
s->hash_type = IS_WHIRLPOOL;
} else if (!strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN)) {
ciphertext += TAG_SHA512_LEN;
s->hash_type = IS_SHA512;
#if SSE_GROUP_SZ_SHA512
s->loop_inc = SSE_GROUP_SZ_SHA512;
#endif
} else if (!strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN)) {
ciphertext += TAG_RIPEMD160_LEN;
s->hash_type = IS_RIPEMD160;
s->num_iterations = 2000;
} else {
// should never get here! valid() should catch all lines that do not have the tags.
fprintf(stderr, "Error, unknown type in truecrypt::get_salt(), [%s]\n", ciphertext);
error();
}
// Convert the hexadecimal salt in binary
for(i = 0; i < 64; i++)
s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
for(; i < 512; i++)
s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
p = ciphertext;
q = strchr(p, '$');
if (!q) /* no keyfiles */
return s;
// process keyfile(s)
p = q + 1;
s->nkeyfiles = atoi(p);
for (idx = 0; idx < s->nkeyfiles; idx++) {
p = strchr(p, '$') + 1; // at first filename
q = strchr(p, '$');
if (!q) { // last file
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, sizeof(tpath));
} else {
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, q-p);
}
/* read this into keyfiles_data[idx] */
fp = fopen(tpath, "rb");
if (!fp)
pexit("fopen %s", p);
if (fseek(fp, 0L, SEEK_END) == -1)
pexit("fseek");
sz = ftell(fp);
if (fseek(fp, 0L, SEEK_SET) == -1)
pexit("fseek");
if (fread(keyfiles_data[idx], 1, sz, fp) != sz)
pexit("fread");
keyfiles_length[idx] = sz;
fclose(fp);
}
return s;
}
static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles)
{
int pl, k;
unsigned char *kpool;
unsigned char *kdata;
int kpool_idx;
size_t i, kdata_sz;
uint32_t crc;
if (pass_memsz < MAX_PASSSZ) {
error();
}
pl = strlen((char *)pass);
memset(pass+pl, 0, MAX_PASSSZ-pl);
if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) {
error();
}
for (k = 0; k < nkeyfiles; k++) {
kpool_idx = 0;
kdata_sz = keyfiles_length[k];
kdata = keyfiles_data[k];
crc = ~0U;
for (i = 0; i < kdata_sz; i++) {
crc = jtr_crc32(crc, kdata[i]);
kpool[kpool_idx++] += (unsigned char)(crc >> 24);
kpool[kpool_idx++] += (unsigned char)(crc >> 16);
kpool[kpool_idx++] += (unsigned char)(crc >> 8);
kpool[kpool_idx++] += (unsigned char)(crc);
/* Wrap around */
if (kpool_idx == KPOOL_SZ)
kpool_idx = 0;
}
}
/* Apply keyfile pool to passphrase */
for (i = 0; i < KPOOL_SZ; i++)
pass[i] += kpool[i];
MEM_FREE(kpool);
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(i = 0; i < count; i+=psalt->loop_inc)
{
unsigned char key[64];
#if SSE_GROUP_SZ_SHA512
unsigned char Keys[SSE_GROUP_SZ_SHA512][64];
#endif
int j;
int ksz = strlen((char *)key_buffer[i]);
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type != IS_SHA512)
#endif
{
strncpy((char*)key, (char*)key_buffer[i], 64);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
}
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type == IS_SHA512) {
int lens[SSE_GROUP_SZ_SHA512];
unsigned char *pin[SSE_GROUP_SZ_SHA512];
union {
unsigned char *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) {
lens[j] = strlen((char*)(key_buffer[i+j]));
strncpy((char*)Keys[j], (char*)key_buffer[i+j], 64);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(Keys[j], 64, psalt->nkeyfiles);
lens[j] = 64;
}
pin[j] = key_buffer[i+j];
x.pout[j] = Keys[j];
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, psalt->salt, 64, psalt->num_iterations, &(x.poutc), sizeof(key), 0);
}
#else
if (psalt->hash_type == IS_SHA512) {
pbkdf2_sha512((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
}
#endif
else if (psalt->hash_type == IS_RIPEMD160)
pbkdf2_ripemd160((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
else
pbkdf2_whirlpool((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
for (j = 0; j < psalt->loop_inc; ++j) {
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type == IS_SHA512)
memcpy(key, Keys[j], sizeof(key));
#endif
// Try to decrypt using AES
AES_XTS_decrypt(key, first_block_dec[i+j], psalt->bin, 16, 256);
}
}
return count;
}
static int cmp_all(void* binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (!memcmp(first_block_dec[i], "TRUE", 4))
return 1;
}
return 0;
}
static int cmp_one(void* binary, int index)
{
if (!memcmp(first_block_dec[index], "TRUE", 4))
return 1;
return 0;
}
// compare a BE string crc32, against crc32, and do it in a safe for non-aligned CPU way.
// this function is not really speed critical.
static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) {
return given_crc32[0] == ((comp_crc32>>24)&0xFF) &&
given_crc32[1] == ((comp_crc32>>16)&0xFF) &&
given_crc32[2] == ((comp_crc32>> 8)&0xFF) &&
given_crc32[3] == ((comp_crc32>> 0)&0xFF);
}
static int cmp_exact(char *source, int idx)
{
#if 0
if (!memcmp(first_block_dec[idx], "TRUE", 4) && !memcmp(&first_block_dec[idx][12], "\0\0\0\0", 4))
return 1;
#else
unsigned char key[64];
unsigned char decr_header[512-64];
CRC32_t check_sum;
#if DEBUG
static int cnt;
char fname[64];
FILE *fp;
#endif
int ksz = strlen((char *)key_buffer[idx]);
strncpy((char*)key, (char*)key_buffer[idx], 64);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
if (psalt->hash_type == IS_SHA512)
pbkdf2_sha512(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
else if (psalt->hash_type == IS_RIPEMD160)
pbkdf2_ripemd160(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
else
pbkdf2_whirlpool(key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
// we have 448 bytes of header (64 bytes unencrypted salt were the first 64 bytes).
// decrypt it and look for 3 items.
AES_XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256);
// first item we look for is a contstant string 'TRUE' in the first 4 bytes
if (memcmp(decr_header, "TRUE", 4))
return 0;
// now we look for 2 crc values. At offset 8 is the first. This provided
// CRC should be the crc32 of the last 256 bytes of the buffer.
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, &decr_header[256-64], 256);
if (!cmp_crc32s(&decr_header[8], ~check_sum))
return 0;
// now we compute crc of the first part of the buffer, up to 4 bytes less than
// the start of that last 256 bytes (i.e. 188 bytes in total). Following this
// buffer we compute crc32 over, should be a 4 byte block that is what we are
// given as a match for this crc32 (of course, those 4 bytes are not part of
// the crc32. The 4 bytes of provided crc32 is the only 4 bytes of the header
// which are not placed into 'some' CRC32 computation.
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, decr_header, 256-64-4);
if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum))
return 0;
#if DEBUG
snprintf(fname, sizeof(fname), "tc_decr_header-%04d.dat", cnt++);
fp = fopen(fname, "wb");
fwrite(decr_header, 1, 512-64, fp);
fclose(fp);
#endif
// Passed 96 bits of tests. This is the right password!
return 1;
#endif
return 0;
}
static void set_key(char* key, int index)
{
strcpy((char*)(key_buffer[index]), key);
}
static char *get_key(int index)
{
return (char*)(key_buffer[index]);
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
struct cust_salt *psalt = (struct cust_salt *)salt;
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt->salt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
static unsigned int tc_hash_algorithm(void *salt)
{
return (unsigned int)((struct cust_salt*)salt)->hash_type;
}
struct fmt_main fmt_truecrypt = {
{
"tc_aes_xts", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if SSE_GROUP_SZ_SHA512
"SHA512 " SHA512_ALGORITHM_NAME " /RIPEMD160/WHIRLPOOL",
#else
#if ARCH_BITS >= 64
"SHA512 64/" ARCH_BITS_STR " /RIPEMD160/WHIRLPOOL",
#else
"SHA512 32/" ARCH_BITS_STR " /RIPEMD160/WHIRLPOOL",
#endif
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#if SSE_GROUP_SZ_SHA512
SSE_GROUP_SZ_SHA512,
SSE_GROUP_SZ_SHA512,
#else
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"hash algorithm [1:SHA512 2:RIPEMD160 3:Whirlpool]",
},
tests_all
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_truecrypt,
fmt_default_split,
fmt_default_binary,
get_salt,
{
tc_hash_algorithm,
},
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_ripemd160 = {
{
"tc_ripemd160", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
"RIPEMD160 32/" ARCH_BITS_STR, // ALGORITHM_NAME,
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests_ripemd160
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_ripemd160,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_sha512 = {
{
"tc_sha512", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if SSE_GROUP_SZ_SHA512
"SHA512 " SHA512_ALGORITHM_NAME, // ALGORITHM_NAME,
#else
#if ARCH_BITS >= 64
"SHA512 64/" ARCH_BITS_STR,
#else
"SHA512 32/" ARCH_BITS_STR,
#endif
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#if SSE_GROUP_SZ_SHA512
SSE_GROUP_SZ_SHA512,
SSE_GROUP_SZ_SHA512,
#else
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests_sha512
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_sha512,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_whirlpool = {
{
"tc_whirlpool", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if ARCH_BITS >= 64
"WHIRLPOOL 64/" ARCH_BITS_STR, // ALGORITHM_NAME,
#else
"WHIRLPOOL 32/" ARCH_BITS_STR, // ALGORITHM_NAME,
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests_whirlpool
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_whirlpool,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
fftw_parallel.c | #include "libraries.h"
typedef struct comp_comp {
float Re;
float Im;
} complex;
//Appropriate exponential power of omega calculation: w^ki = e^(2*PI*k*i/n)
complex omega(int n, int i, int k) {
complex omeg;
omeg.Re = cos(k*i*2*PI/n);
omeg.Im = sin(k*i*2*PI/n);
return omeg;
}
// addition of 2 complex numbers c1,c2
complex csum(complex c1, complex c2){
complex sum;
sum.Re = c1.Re + c2.Re;
sum.Im = c1.Im + c2.Im;
return sum;
}
// multiplication of 2 complex numbers c1,c2
complex cmul(complex c1, complex c2){
complex mul;
mul.Re = c1.Re * c2.Re - c1.Im * c2.Im;
mul.Im = c1.Re * c2.Im + c1.Im * c2.Re;
return mul;
}
// subtraction of 2 complex numbers c1,c2
complex csub(complex c1, complex c2){
complex sub;
sub.Re = c1.Re - c2.Re;
sub.Im = c1.Im - c2.Im;
return sub;
}
int bitrev(int inp, int numbits)
{
int i, rev=0;
//printf("Original int %d\n",inp);
for (i=0; i < numbits; i++)
{
rev = (rev << 1) | (inp & 1);
inp >>= 1;
}
// printf("Reversed int %d\n",rev);
return rev;
}
double parallel_FFT(complex *X, complex *Y, long n,int num_thr) {
//long ii, shift,prev_shift, l;
long r, temp,m, i,w; //j, k, w, y, t, i2;
//complex temp2, temp3, omeg;
//complex t1;
complex *R,*S,omeg;
int start,end,element,border,tid,block,j,k;
double en,bg,ext;
R = (complex *) malloc(n*sizeof(complex));
S = (complex *) malloc(n*sizeof(complex));
/* Calculate r=logn with n=2^r */
ext=0;
bg=omp_get_wtime();
omp_set_num_threads(num_thr);
r=0;
temp=n;
while ( (n /= 2 ) != 0 ){
r++;}
n=temp;
//Calculate number of iterations without communication
for (i=0; i<n; i++){
R[i].Re = X[i].Re;
R[i].Im = X[i].Im;
}
border=n/2;
//r-d iterations with communication but since
for (m=0; m<r; m++){
//mb=pow(2,m);
for (i=0; i<n; i++){
S[i].Re = R[i].Re;
S[i].Im = R[i].Im;
//printf("step %ld : S has %fl \n",m,S[i].Re);
}
block=n/num_thr;
#pragma omp parallel shared(S,border,block,m,r,n) private (element,start,end,tid,j,k,w,omeg)
{
tid=omp_get_thread_num();
start=(tid)*block;
end=start + block;
for(element=start; element<end; element++){
j=(element & (~(1 << (r-m-1)))) | (0 << (r-m-1));
k=(element & (~(1 << (r-m-1)))) | (1 << (r-m-1));
//Appropriate omega for each butterfly group
w=bitrev(element,r);
w =w << (r-1-m);
omeg=omega(n,-1,w);
if (element<k){
//R[element].Re=S[j].Re +2*S[k].Re;
R[element]=csum(S[j],cmul(omeg,S[k]));
//printf("%lf + 2* %lf \n",S[j].Re,S[k].Re);
//printf("Thread %d at end of step %ld local R %fl %fl, j: %d k: %d \n",tid,m,R[element].Re,R[element].Im,j,k);
}
else {
//R[element].Re=S[k].Re +2*S[j].Re;
R[element]=csum(S[k],cmul(omeg,S[j]));
//printf("%lf + 2* %lf \n",S[k].Re,S[j].Re);
//printf("Thread %d at end of step %ld local R %fl %fl , k: %d j: %d \n",tid,m,R[element].Re,R[element].Im,k,j);
}
}
{
#pragma omp barrier
}
//At the end of the last step reverse indices
if(m==r-1){
for(element=start; element<end; element++){
Y[element]=R[bitrev(element,r)];
}
}
}
border=border/2;
}
en=omp_get_wtime();
ext=en-bg;
//printf("Mean Time: %lf",en-bg);
free(R);
free(S);
return ext;
}
//After the process is done reverse indices in parallel
int main(int argc, char** argv) {
int size, i,num_thr;
complex *X, *Y;
double sum,mean,ext;
//double start,end;
//Provide power of size at input
size = pow(2,atoi(argv[1]));
num_thr = pow(2,atoi(argv[2]));
if (size < num_thr || num_thr==1) {
printf("Non optimal partitioning.Exiting\n");
return -1;
}
Y = (complex *) malloc(size*sizeof(complex));
X = (complex *) malloc(size*sizeof(complex));
for(i=0;i<size;i++){
X[i].Re=(float)rand();
X[i].Im=(float)rand();
}
//for(i=0;i<size;i++) {
// printf("Input: %fl %fl\n",X[i].Re,X[i].Im); }
sum=0;
ext=0;
for(i=0; i<100; i++){
//start=omp_get_wtime();
ext=parallel_FFT(X,Y,size,num_thr);
//end=omp_get_wtime();
sum=sum+ext;
}
mean=(double) sum/ (double) 100;
printf("Time spent in parallel_FFT for %d elements with %d threads: %lf\n",size,num_thr,mean);
//printf("Time spent in serial_FFT for %d elements: %.20lf\n",size,(double)(end-start)/ CLOCKS_PER_SEC);
//Printing the values of Y for debugging
//for(i=0;i<size;i++) {
//printf("Result: %fl %fl\n",Y[i].Re,Y[i].Im); }
free(X);
free(Y);
return 0;
} |
GB_binop__second_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_fp64
// A.*B function (eWiseMult): GB_AemultB__second_fp64
// A*D function (colscale): GB_AxD__second_fp64
// D*A function (rowscale): GB_DxB__second_fp64
// C+=B function (dense accum): GB_Cdense_accumB__second_fp64
// C+=b function (dense accum): GB_Cdense_accumb__second_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_fp64
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_fp64
// C=A'+scalar GB_bind2nd_tran__second_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = bij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__second_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
2DConvolution.c | /**
* 2DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NI SIZE
#define NJ SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE *A, DATA_TYPE *B) {
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2;
c21 = +0.5;
c31 = -0.8;
c12 = -0.3;
c22 = +0.6;
c32 = -0.9;
c13 = +0.4;
c23 = +0.7;
c33 = +0.10;
for (int i = 1; i < NI - 1; ++i) // 0
{
for (int j = 1; j < NJ - 1; ++j) // 1
{
B[i * NJ + j] =
c11 * A[(i - 1) * NJ + (j - 1)] + c12 * A[(i + 0) * NJ + (j - 1)] +
c13 * A[(i + 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] +
c22 * A[(i + 0) * NJ + (j + 0)] + c23 * A[(i + 1) * NJ + (j + 0)] +
c31 * A[(i - 1) * NJ + (j + 1)] + c32 * A[(i + 0) * NJ + (j + 1)] +
c33 * A[(i + 1) * NJ + (j + 1)];
}
}
}
void conv2D_OMP(DATA_TYPE *A, DATA_TYPE *B) {
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2;
c21 = +0.5;
c31 = -0.8;
c12 = -0.3;
c22 = +0.6;
c32 = -0.9;
c13 = +0.4;
c23 = +0.7;
c33 = +0.10;
#pragma omp target map(to : A[ : NI *NJ]) map(from : B[ : NI *NJ]) \
device(DEVICE_ID)
#pragma omp parallel for
for (int i = 1; i < NI - 1; ++i) {
for (int j = 1; j < NJ - 1; ++j) {
B[i * NJ + j] =
c11 * A[(i - 1) * NJ + (j - 1)] + c12 * A[(i + 0) * NJ + (j - 1)] +
c13 * A[(i + 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] +
c22 * A[(i + 0) * NJ + (j + 0)] + c23 * A[(i + 1) * NJ + (j + 0)] +
c31 * A[(i - 1) * NJ + (j + 1)] + c32 * A[(i + 0) * NJ + (j + 1)] +
c33 * A[(i + 1) * NJ + (j + 1)];
}
}
}
void init(DATA_TYPE *A) {
int i, j;
for (i = 0; i < NI; ++i) {
for (j = 0; j < NJ; ++j) {
A[i * NJ + j] = (float)rand() / RAND_MAX;
}
}
}
int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) {
int i, j, fail;
fail = 0;
// Compare B and B_GPU
for (i = 1; i < (NI - 1); i++) {
for (j = 1; j < (NJ - 1); j++) {
if (percentDiff(B[i * NJ + j], B_GPU[i * NJ + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end, t_start_OMP, t_end_OMP;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *B_OMP;
A = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
B_OMP = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
fprintf(stdout, ">> Two dimensional (2D) convolution <<\n");
// initialize the arrays
init(A);
t_start_OMP = rtclock();
conv2D_OMP(A, B_OMP);
t_end_OMP = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP); //);
#ifdef RUN_TEST
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //);
compareResults(B, B_OMP);
#endif
free(A);
free(B);
free(B_OMP);
return fail;
}
|
num_procs.c | #include <stdio.h>
#include <omp.h>
int main( )
{
printf("%d\n", omp_get_num_procs( ));
#pragma omp parallel
#pragma omp master
{
printf("%d\n", omp_get_num_procs( ));
}
}
|
findSubGraphs.c | #include "defs.h"
double findSubGraphs(graph* G,
edge* maxIntWtList, int maxIntWtListSize) {
VERT_T* S;
LONG_T *start;
char* visited;
LONG_T *pSCount;
#ifdef _OPENMP
omp_lock_t* vLock;
#endif
LONG_T phase_num, numPhases;
LONG_T count;
double elapsed_time = get_seconds();
numPhases = SubGraphPathLength + 1;
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
#endif
VERT_T *pS, *pSt;
LONG_T pCount, pS_size;
LONG_T v, w, search_num;
int tid, nthreads;
LONG_T j, k, vert, n;
#ifdef _OPENMP
LONG_T i;
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
n = G->n;
pS_size = n/nthreads + 1;
pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T));
assert(pS != NULL);
if (tid == 0) {
S = (VERT_T *) malloc(n*sizeof(VERT_T));
visited = (char *) calloc(n, sizeof(char));
start = (LONG_T *) calloc((numPhases+2), sizeof(LONG_T));
pSCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T));
#ifdef _OPENMP
vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t));
#endif
}
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_init_lock(&vLock[i]);
}
#endif
for (search_num=0; search_num<maxIntWtListSize; search_num++) {
#ifdef _OPENMP
#pragma omp barrier
#endif
/* Run path-limited BFS in parallel */
if (tid == 0) {
free(visited);
visited = (char *) calloc(n, sizeof(char));
S[0] = maxIntWtList[search_num].startVertex;
S[1] = maxIntWtList[search_num].endVertex;
visited[S[0]] = (char) 1;
visited[S[1]] = (char) 1;
count = 2;
phase_num = 1;
start[0] = 0;
start[1] = 1;
start[2] = 2;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
while (phase_num <= SubGraphPathLength) {
pCount = 0;
#ifdef _OPENMP
#pragma omp for
#endif
for (vert=start[phase_num]; vert<start[phase_num+1]; vert++) {
v = S[vert];
for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) {
w = G->endV[j];
if (v == w)
continue;
#ifdef _OPENMP
int myLock = omp_test_lock(&vLock[w]);
if (myLock) {
#endif
if (visited[w] != (char) 1) {
visited[w] = (char) 1;
if (pCount == pS_size) {
/* Resize pS */
pSt = (VERT_T *)
malloc(2*pS_size*sizeof(VERT_T));
memcpy(pSt, pS, pS_size*sizeof(VERT_T));
free(pS);
pS = pSt;
pS_size = 2*pS_size;
}
pS[pCount++] = w;
}
#ifdef _OPENMP
omp_unset_lock(&vLock[w]);
}
#endif
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif
pSCount[tid+1] = pCount;
#ifdef _OPENMP
#pragma omp barrier
#endif
if (tid == 0) {
pSCount[0] = start[phase_num+1];
for(k=1; k<=nthreads; k++) {
pSCount[k] = pSCount[k-1] + pSCount[k];
}
start[phase_num+2] = pSCount[nthreads];
count = pSCount[nthreads];
phase_num++;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
for (k = pSCount[tid]; k < pSCount[tid+1]; k++) {
S[k] = pS[k-pSCount[tid]];
}
#ifdef _OPENMP
#pragma omp barrier
#endif
} /* End of search */
if (tid == 0) {
fprintf(stderr, "Search from <%ld, %ld>, number of vertices visited:"
" %ld\n", (long) S[0], (long) S[1], (long) count);
}
} /* End of outer loop */
free(pS);
#ifdef _OPENMP
#pragma omp barrier
#pragma omp for
for (i=0; i<n; i++) {
omp_destroy_lock(&vLock[i]);
}
#pragma omp barrier
#endif
if (tid == 0) {
free(S);
free(start);
free(visited);
free(pSCount);
#ifdef _OPENMP
free(vLock);
#endif
}
#ifdef _OPENMP
}
#endif
elapsed_time = get_seconds() - elapsed_time;
return elapsed_time;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return isVisible(Old) || New->isExternallyVisible();
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the stack of attributes that were pushed by
/// \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
AttributeList *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
/// \brief The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// \brief The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// \brief The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentCleanup(ParentCleanup),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract ||
Context == UnevaluatedList;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
IdentifierInfo *AttrName);
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
VisibleModuleSet VisibleModules;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(VarDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation Loc);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
bool canInitializeWithParenthesizedList(QualType TargetType);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Module, ///< 'module X;'
Partition, ///< 'module partition X;'
Implementation, ///< 'module implementation X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// \brief We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// \brief We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy,
bool EnumUnderlyingIsImplicit,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// \brief Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, SourceRange Range,
StringRef Name, bool Override,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(SourceLocation Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(ExprResult &SrcExpr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive,
bool allowArrayTypes,
bool implicit,
bool overrideExisting = false);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowParamOrMoveConstructible);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowParamOrMoveConstructible);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
llvm::InlineAsmIdentifierInfo &Info,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// \brief Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void EmitAvailabilityWarning(AvailabilityResult AR, NamedDecl *D,
StringRef Message, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// \brief Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList,
UsingDirectiveDecl * &UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
bool HasTypenameKeyword,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
SourceLocation EllipsisLoc,
AttributeList *AttrList,
bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
SourceLocation EllipsisLoc,
AttributeList *AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// \brief Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// \brief The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// \brief The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// \brief The location of the identifier.
SourceLocation IdentifierLoc;
/// \brief The location of the '::'.
SourceLocation CCLoc;
/// \brief Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// \brief Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<Decl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// \brief After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// \brief CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
ArrayRef<TemplateArgument> template_arguments() const {
return {TemplateArgs, NumTemplateArgs};
}
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// \brief Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// \brief Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well-formed '\#pragma clang attribute push'.
void ActOnPragmaAttributePush(AttributeList &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
/// \brief Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
/// \brief Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E);
ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// \brief Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// \brief Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// \brief Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// \brief Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// \brief Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// \brief Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const Decl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Set to true inside '#pragma omp declare target' region.
bool IsInOpenMPDeclareTargetContext = false;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// \brief Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *IsOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// \brief Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// \brief Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer);
/// \brief Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D);
/// Return true inside OpenMP target region.
bool isInOpenMPDeclareTargetContext() const {
return IsInOpenMPDeclareTargetContext;
}
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// \brief Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// \brief Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// \brief Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// \brief Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const AttributeList *Attr);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartImpl(CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinMSVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
/// \brief The diagnostic we should emit for \c D, or \c AR_Available.
///
/// \param D The declaration to check. Note that this may be altered to point
/// to another declaration that \c D gets it's availability from. i.e., we
/// walk the list of typedefs to find an availability attribute.
///
/// \param Message If non-null, this will be populated with the message from
/// the availability attribute that is selected.
AvailabilityResult ShouldDiagnoseAvailabilityOfDecl(NamedDecl *&D,
std::string *Message);
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
/// \brief Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// \brief Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// \brief Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// \brief Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// \brief This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// \brief This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(Sema::UnevaluatedList, nullptr,
false);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
a.35.3.c | /* { dg-do compile } */
void work (int, int);
void
wrong3 (int n)
{
#pragma omp parallel default(shared)
{
int i;
#pragma omp for
for (i = 0; i < n; i++)
{
/* incorrect nesting of regions */
#pragma omp single /* { dg-warning "may not be closely nested" } */
work (i, 0);
}
}
}
|
reference.h |
double dFphi_ref(double phi, double u, double lambda)
{
return (-phi*(1.0-phi*phi)+lambda*u*(1.0-phi*phi)*(1.0-phi*phi));
}
double GradientX_ref(double phi[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz, int x, int y, int z)
{
return (phi[x+1][y][z] - phi[x-1][y][z]) / (2.0*dx);
}
double GradientY_ref(double phi[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz, int x, int y, int z)
{
return (phi[x][y+1][z] - phi[x][y-1][z]) / (2.0*dy);
}
double GradientZ_ref(double phi[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz, int x, int y, int z)
{
return (phi[x][y][z+1] - phi[x][y][z-1]) / (2.0*dz);
}
double Divergence_ref(double phix[][DATAYSIZE][DATAXSIZE],
double phiy[][DATAYSIZE][DATAXSIZE],
double phiz[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz, int x, int y, int z)
{
return GradientX_ref(phix,dx,dy,dz,x,y,z) +
GradientY_ref(phiy,dx,dy,dz,x,y,z) +
GradientZ_ref(phiz,dx,dy,dz,x,y,z);
}
double Laplacian_ref(double phi[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz, int x, int y, int z)
{
double phixx = (phi[x+1][y][z] + phi[x-1][y][z] - 2.0 * phi[x][y][z]) / SQ(dx);
double phiyy = (phi[x][y+1][z] + phi[x][y-1][z] - 2.0 * phi[x][y][z]) / SQ(dy);
double phizz = (phi[x][y][z+1] + phi[x][y][z-1] - 2.0 * phi[x][y][z]) / SQ(dz);
return phixx + phiyy + phizz;
}
double An_ref(double phix, double phiy, double phiz, double epsilon)
{
if (phix != 0.0 || phiy != 0.0 || phiz != 0.0){
return ((1.0 - 3.0 * epsilon) * (1.0 + (((4.0 * epsilon) / (1.0-3.0*epsilon))*
((SQ(phix)*SQ(phix)+SQ(phiy)*SQ(phiy)+SQ(phiz)*SQ(phiz)) /
((SQ(phix)+SQ(phiy)+SQ(phiz))*(SQ(phix)+SQ(phiy)+SQ(phiz)))))));
}
else
{
return (1.0-((5.0/3.0)*epsilon));
}
}
double Wn_ref(double phix, double phiy, double phiz, double epsilon, double W0)
{
return (W0*An_ref(phix,phiy,phiz,epsilon));
}
double taun_ref(double phix, double phiy, double phiz, double epsilon, double tau0)
{
return tau0 * SQ(An_ref(phix,phiy,phiz,epsilon));
}
double dFunc_ref(double l, double m, double n)
{
if (l != 0.0 || m != 0.0 || n != 0.0){
return (((l*l*l*(SQ(m)+SQ(n)))-(l*(SQ(m)*SQ(m)+SQ(n)*SQ(n)))) /
((SQ(l)+SQ(m)+SQ(n))*(SQ(l)+SQ(m)+SQ(n))));
}
else
{
return 0.0;
}
}
void calculateForce_ref(double phi[][DATAYSIZE][DATAXSIZE],
double Fx[][DATAYSIZE][DATAXSIZE],
double Fy[][DATAYSIZE][DATAXSIZE],
double Fz[][DATAYSIZE][DATAXSIZE],
double dx, double dy, double dz,
double epsilon, double W0, double tau0)
{
#pragma omp parallel for collapse(3)
for (int ix = 0; ix < DATAXSIZE; ix++) {
for (int iy = 0; iy < DATAYSIZE; iy++) {
for (int iz = 0; iz < DATAZSIZE; iz++) {
if ((ix < (DATAXSIZE-1)) && (iy < (DATAYSIZE-1)) &&
(iz < (DATAZSIZE-1)) && (ix > (0)) &&
(iy > (0)) && (iz > (0))) {
double phix = GradientX_ref(phi,dx,dy,dz,ix,iy,iz);
double phiy = GradientY_ref(phi,dx,dy,dz,ix,iy,iz);
double phiz = GradientZ_ref(phi,dx,dy,dz,ix,iy,iz);
double sqGphi = SQ(phix) + SQ(phiy) + SQ(phiz);
double c = 16.0 * W0 * epsilon;
double w = Wn_ref(phix,phiy,phiz,epsilon,W0);
double w2 = SQ(w);
Fx[ix][iy][iz] = w2 * phix + sqGphi * w * c * dFunc_ref(phix,phiy,phiz);
Fy[ix][iy][iz] = w2 * phiy + sqGphi * w * c * dFunc_ref(phiy,phiz,phix);
Fz[ix][iy][iz] = w2 * phiz + sqGphi * w * c * dFunc_ref(phiz,phix,phiy);
}
else
{
Fx[ix][iy][iz] = 0.0;
Fy[ix][iy][iz] = 0.0;
Fz[ix][iy][iz] = 0.0;
}
}
}
}
}
// device function to set the 3D volume
void allenCahn_ref(double phinew[][DATAYSIZE][DATAXSIZE],
double phiold[][DATAYSIZE][DATAXSIZE],
double uold[][DATAYSIZE][DATAXSIZE],
double Fx[][DATAYSIZE][DATAXSIZE],
double Fy[][DATAYSIZE][DATAXSIZE],
double Fz[][DATAYSIZE][DATAXSIZE],
double epsilon, double W0, double tau0, double lambda,
double dt, double dx, double dy, double dz)
{
#pragma omp parallel for collapse(3)
for (int ix = 1; ix < DATAXSIZE-1; ix++) {
for (int iy = 1; iy < DATAYSIZE-1; iy++) {
for (int iz = 1; iz < DATAZSIZE-1; iz++) {
double phix = GradientX_ref(phiold,dx,dy,dz,ix,iy,iz);
double phiy = GradientY_ref(phiold,dx,dy,dz,ix,iy,iz);
double phiz = GradientZ_ref(phiold,dx,dy,dz,ix,iy,iz);
phinew[ix][iy][iz] = phiold[ix][iy][iz] +
(dt / taun_ref(phix,phiy,phiz,epsilon,tau0)) *
(Divergence_ref(Fx,Fy,Fz,dx,dy,dz,ix,iy,iz) -
dFphi_ref(phiold[ix][iy][iz], uold[ix][iy][iz],lambda));
}
}
}
}
void boundaryConditionsPhi_ref(double phinew[][DATAYSIZE][DATAXSIZE])
{
#pragma omp parallel for collapse(3)
for (int ix = 0; ix < DATAXSIZE; ix++) {
for (int iy = 0; iy < DATAYSIZE; iy++) {
for (int iz = 0; iz < DATAZSIZE; iz++) {
if (ix == 0){
phinew[ix][iy][iz] = -1.0;
}
else if (ix == DATAXSIZE-1){
phinew[ix][iy][iz] = -1.0;
}
else if (iy == 0){
phinew[ix][iy][iz] = -1.0;
}
else if (iy == DATAYSIZE-1){
phinew[ix][iy][iz] = -1.0;
}
else if (iz == 0){
phinew[ix][iy][iz] = -1.0;
}
else if (iz == DATAZSIZE-1){
phinew[ix][iy][iz] = -1.0;
}
}
}
}
}
void thermalEquation_ref(double unew[][DATAYSIZE][DATAXSIZE],
double uold[][DATAYSIZE][DATAXSIZE],
double phinew[][DATAYSIZE][DATAXSIZE],
double phiold[][DATAYSIZE][DATAXSIZE],
double D, double dt, double dx, double dy, double dz)
{
#pragma omp parallel for collapse(3)
for (int ix = 1; ix < DATAXSIZE-1; ix++) {
for (int iy = 1; iy < DATAYSIZE-1; iy++) {
for (int iz = 1; iz < DATAZSIZE-1; iz++) {
unew[ix][iy][iz] = uold[ix][iy][iz] +
0.5*(phinew[ix][iy][iz]-
phiold[ix][iy][iz]) +
dt * D * Laplacian_ref(uold,dx,dy,dz,ix,iy,iz);
}
}
}
}
void boundaryConditionsU_ref(double unew[][DATAYSIZE][DATAXSIZE], double delta)
{
#pragma omp parallel for collapse(3)
for (int ix = 0; ix < DATAXSIZE; ix++) {
for (int iy = 0; iy < DATAYSIZE; iy++) {
for (int iz = 0; iz < DATAZSIZE; iz++) {
if (ix == 0){
unew[ix][iy][iz] = -delta;
}
else if (ix == DATAXSIZE-1){
unew[ix][iy][iz] = -delta;
}
else if (iy == 0){
unew[ix][iy][iz] = -delta;
}
else if (iy == DATAYSIZE-1){
unew[ix][iy][iz] = -delta;
}
else if (iz == 0){
unew[ix][iy][iz] = -delta;
}
else if (iz == DATAZSIZE-1){
unew[ix][iy][iz] = -delta;
}
}
}
}
}
void swapGrid_ref(double cnew[][DATAYSIZE][DATAXSIZE],
double cold[][DATAYSIZE][DATAXSIZE])
{
#pragma omp parallel for collapse(3)
for (int ix = 0; ix < DATAXSIZE; ix++) {
for (int iy = 0; iy < DATAYSIZE; iy++) {
for (int iz = 0; iz < DATAZSIZE; iz++) {
double tmp = cnew[ix][iy][iz];
cnew[ix][iy][iz] = cold[ix][iy][iz];
cold[ix][iy][iz] = tmp;
}
}
}
}
void reference(nRarray *phi_ref, nRarray *u_ref, int vol, int num_steps)
{
const double dx = 0.4;
const double dy = 0.4;
const double dz = 0.4;
const double dt = 0.01;
const double delta = 0.8;
const double epsilon = 0.07;
const double W0 = 1.0;
const double beta0 = 0.0;
const double D = 2.0;
const double d0 = 0.5;
const double a1 = 1.25 / std::sqrt(2.0);
const double a2 = 0.64;
const double lambda = (W0*a1)/(d0);
const double tau0 = ((W0*W0*W0*a1*a2)/(d0*D)) + ((W0*W0*beta0)/(d0));
nRarray *d_phiold; // storage for result computed on device
nRarray *d_phinew;
nRarray *d_uold;
nRarray *d_unew;
nRarray *d_Fx;
nRarray *d_Fy;
nRarray *d_Fz;
// allocate buffers
d_phiold = (nRarray*) malloc (vol*sizeof(double));
d_phinew = (nRarray*) malloc (vol*sizeof(double));
d_uold = (nRarray*) malloc (vol*sizeof(double));
d_unew = (nRarray*) malloc (vol*sizeof(double));
d_Fx = (nRarray*) malloc (vol*sizeof(double));
d_Fy = (nRarray*) malloc (vol*sizeof(double));
d_Fz = (nRarray*) malloc (vol*sizeof(double));
memcpy(d_phiold, phi_ref, (vol*sizeof(double)));
memcpy(d_uold, u_ref, (vol*sizeof(double)));
int t = 0;
while (t <= num_steps) {
calculateForce_ref(d_phiold,d_Fx,d_Fy,d_Fz,
dx,dy,dz,epsilon,W0,tau0);
allenCahn_ref(d_phinew,d_phiold,d_uold,
d_Fx,d_Fy,d_Fz,
epsilon,W0,tau0,lambda,
dt,dx,dy,dz);
boundaryConditionsPhi_ref(d_phinew);
thermalEquation_ref(d_unew,d_uold,d_phinew,d_phiold,
D,dt,dx,dy,dz);
boundaryConditionsU_ref(d_unew,delta);
swapGrid_ref(d_phinew, d_phiold);
swapGrid_ref(d_unew, d_uold);
t++;
}
memcpy(phi_ref, d_phiold, (vol*sizeof(double)));
memcpy(u_ref, d_uold, (vol*sizeof(double)));
free(d_phiold);
free(d_phinew);
free(d_uold);
free(d_unew);
free(d_Fx);
free(d_Fy);
free(d_Fz);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(4*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t2+Nx,256),floord(4*t3+Nx,256)),floord(Nt+Nx-4,256)),floord(2*t1+Nx+1,256)),floord(4*t1-4*t2+Nz+Nx-1,256));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),256*t4+254),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
dctz-test.c | /**
* @file dctz-zc-test.c
* @author Seung Woo Son
* @date July 2019
* @brief DCTZ test program for Z-Checker
* (C) 2019 University of Massachuetts Lowell.
See LICENSE in top-level directory.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "dctz.h"
#ifdef WITH_Z_CHECKER
#include "zc.h"
#endif
int main (int argc, char * argv[])
{
size_t r5=0,r4=0,r3=0,r2=0,r1=0;
size_t typesize = 0;
char oriFilePath[640], outputFilePath[640];
#ifdef WITH_Z_CHECKER
char *solName = NULL;
#endif
char *varName;
double error_bound;
void *a_r;
double *d;
float *f;
int datatype;
char *a_z;
int N, min_argc;
#ifdef WITH_Z_CHECKER
min_argc = 7;
#else
min_argc = 6;
#endif
if (argc < min_argc) {
#ifdef WITH_Z_CHECKR
printf ("Test case: %s -d|-f [err bound] [var name] [srcFilePath] [dimension sizes...] solName \n", argv[0]);
printf ("Example: %s -d 1E-3 sedov testdata/x86/testfloat_8_8_128.dat 8 8 128 dctz-ec(1E-3) \n", argv[0]);
#else
printf ("Test case: %s -d|-f [err bound] [var name] [srcFilePath] [dimension sizes...] \n", argv[0]);
printf ("Example: %s -d 1E-3 sedov testdata/x86/testfloat_8_8_128.dat 8 8 128 \n", argv[0]);
#endif
exit (0);
}
error_bound = atof (argv[2]);
varName=argv[3];
assert (argc >= 6);
#ifdef WITH_Z_CHECKER
if (argc >= 7) { /* 1D */
r1 = N = atoi (argv[5]);
solName = argv[6]; /* dummy when z-checker is not set */
}
if (argc >= 8) { /* 2D */
r2 = atoi (argv[6]);
N = r1*r2;
solName = argv[7]; /* dummy when z-checker is not set */
}
if (argc >= 9) { /* 3D */
r3 = atoi (argv[7]);
N = r1*r2*r3;
solName = argv[8]; /* dummy when z-checker is not set */
}
if (argc >= 10) { /* 4D */
r4 = atoi (argv[8]);
N = r1*r2*r3*r4;
solName = argv[9]; /* dummy when z-checker is not set */
}
#else
if (argc >= 6) { /* 1D */
r1 = N = atoi (argv[5]);
}
if (argc >= 7) { /* 2D */
r2 = atoi (argv[6]);
N = r1*r2;
}
if (argc >= 8) { /* 3D */
r3 = atoi (argv[7]);
N = r1*r2*r3;
}
if (argc >= 9) { /* 4D */
r4 = atoi (argv[8]);
N = r1*r2*r3*r4;
}
#endif
printf ("total number = %d\n", N);
sprintf (oriFilePath, "%s", argv[4]);
#ifdef USE_QTABLE
sprintf (outputFilePath, "%s.qt.%s.z", oriFilePath, argv[2]);
#else
sprintf (outputFilePath, "%s.t.%s.z", oriFilePath, argv[2]);
#endif /* USE_QTABLE */
#ifdef WITH_Z_CHECKER
ZC_Init ("zc.config"); /* hard coded */
#endif /* WITH_Z_CHECKER */
size_t outSize;
#ifdef WITH_Z_CHECKER
ZC_DataProperty* dataProperty = NULL;
ZC_CompareData *compareResult = NULL;
#endif /* WITH_Z_CHECKER */
FILE *fp_in = fopen (oriFilePath, "rb");
if (fp_in == NULL) {
perror ("Failed: ");
printf ("File Not Found\n");
return (1);
}
if (!strcmp (argv[1], "-d")) {
typesize = sizeof(double);
datatype = data_type_double;
if (NULL == (d = (double *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a\n");
exit (1);
}
if (NULL == (a_r = (double *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a\n");
exit (1);
}
if (NULL == (a_z = (char *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a_z\n");
exit (1);
}
fread (d, typesize, N, fp_in);
dct_init (BLK_SZ);
#ifdef WITH_Z_CHECKER
dataProperty = ZC_startCmpr (varName, ZC_DOUBLE, d, r5, r4, r3, r2, r1);
#endif /* WITH_Z_CHECKER */
dctz_compress (d, N, &outSize, a_z, error_bound);
}
else {
typesize = sizeof (float);
datatype = data_type_float;
if (NULL == (f = (float *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a\n");
exit (1);
}
if (NULL == (a_r = (float *)malloc (N*typesize))) {
fprintf(stderr, "Out of memory: a\n");
exit (1);
}
if (NULL == (a_z = (char *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a_z\n");
exit (1);
}
fread (f, typesize, N, fp_in);
dct_init_f (BLK_SZ);
#ifdef WITH_Z_CHECKER
dataProperty = ZC_startCmpr (varName, ZC_FLOAT, f, r5, r4, r3, r2, r1);
#endif /* WITH_Z_CHECKER */
dctz_compress_float (f, N, &outSize, a_z, error_bound);
}
printf ("oriFilePath = %s, outputFilePath = %s, datatype = %d error = %s, dim1 = %zu dim2 = %zu dim3=%zu \n", oriFilePath, outputFilePath, datatype, argv[2], r1, r2, r3);
printf ("outsize = %zu\n", outSize);
#ifdef WITH_Z_CHECKER
compareResult = ZC_endCmpr (dataProperty, solName, outSize);
#endif /* WITH_Z_CHECKER */
struct header h;
memcpy (&h, a_z, sizeof(struct header));
double SF = h.scaling_factor;
#ifdef DEBUG
printf ("SF = %f\n", SF);
#endif /* DEBUG */
// deapply scaling factor to the original data
double xscale = pow (10, SF-1);
if (SF != 1.0)
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(a, SF)
#endif
for (int i=0; i<N; i++) {
if (datatype == data_type_double)
d[i] *= xscale;
else
f[i] *= xscale;
}
#ifdef DEBUG
for (int i=0; i<BLK_SZ; i++) { // show the first block
printf ("d[%d] = %e %p\n", i, d[i], &d[i]);
if (i%BLK_SZ == 0 && i != 0) printf ("\n");
}
#endif
fclose (fp_in);
char zfile[640];
FILE *fp_z;
int icount;
#ifdef USE_QTABLE
sprintf (zfile, "%s.qt.%s.z", oriFilePath, argv[2]);
#else
sprintf (zfile, "%s.t.%s.z", oriFilePath, argv[2]);
#endif
fp_z = fopen (zfile, "wb");
icount = fwrite (a_z, outSize, 1, fp_z);
if (icount != 1) {
printf ("Write qtz file failed: %lu != %d!\n", outSize, icount);
exit (1);
}
fclose (fp_z);
#ifdef USE_QTABLE
sprintf (zfile, "%s.qt.%s.z.r", oriFilePath, argv[2]);
#else
sprintf (zfile, "%s.t.%s.z.r", oriFilePath, argv[2]);
#endif /* USE_QTABLE */
FILE *fp_r;
fp_r = fopen (zfile, "wb");
#ifdef WITH_Z_CHECKER
ZC_startDec ();
#endif /* WITH_Z_CHECKER */
if (datatype == data_type_double) {
dctz_decompress (a_z, (double *) a_r);
#ifdef WITH_Z_CHECKER
ZC_endDec (compareResult, (double *) a_r);
#endif /* WITH_Z_CHECKER */
icount = fwrite ((double *)a_r, N*sizeof(double), 1, fp_r);
}
else {
dctz_decompress_float (a_z, (float *) a_r);
#ifdef WITH_Z_CHECKER
ZC_endDec (compareResult, (float *)a_r);
#endif /* WITH_Z_CHECKER */
icount = fwrite ((float *)a_r, N*sizeof(float), 1, fp_r);
}
if (icount != 1) {
printf ("Write qtz.r file failed: != %d!\n", icount);
exit (1);
}
fclose (fp_r);
#ifdef WITH_Z_CHECKER
freeDataProperty (dataProperty);
freeCompareResult (compareResult);
#endif /* WITH_Z_CHECKER */
free (a_z);
free (a_r);
printf ("done\n");
#ifdef WITH_Z_CHECKER
ZC_Finalize ();
#endif /* WITH_Z_CHECKER */
return 0;
}
|
sha256.c | /*
============================================================================
Name : MPC_SHA256.c
Author : Sobuno
Version : 0.1
Description : MPC SHA256 for one block only
============================================================================
Modified to fit runSHA256Proof
*/
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "shared.h"
#include "omp.h"
#include "avtc.h"
#define CH(e, f, g) ((e & f) ^ ((~e) & g))
void CMT(unsigned char k[16], V v, unsigned char r[4], unsigned char hash[SHA256_DIGEST_LENGTH]) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, k, 16);
SHA256_Update(&ctx, &v, sizeof(v));
SHA256_Update(&ctx, r, 4);
SHA256_Final(hash, &ctx);
}
int totalRandom = 0;
int totalSha = 0;
int totalSS = 0;
int totalHash = 0;
uint32_t rand32() {
uint32_t x;
x = rand() & 0xff;
x |= (rand() & 0xff) << 8;
x |= (rand() & 0xff) << 16;
x |= (rand() & 0xff) << 24;
return x;
}
void printbits(uint32_t n) {
if (n) {
printbits(n >> 1);
printf("%d", n & 1);
}
}
void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) {
z[0] = x[0] ^ y[0];
z[1] = x[1] ^ y[1];
z[2] = x[2] ^ y[2];
}
void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3],
int *countY) {
uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount),
getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint32_t t[3] = {0};
t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1];
t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2];
t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0];
z[0] = t[0];
z[1] = t[1];
z[2] = t[2];
views[0].y[*countY] = z[0];
views[1].y[*countY] = z[1];
views[2].y[*countY] = z[2];
(*countY)++;
}
void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) {
z[0] = ~x[0];
z[1] = ~x[1];
z[2] = ~x[2];
}
void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3],
int *countY) {
uint32_t c[3] = {0};
uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount),
getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for (int i = 0; i < 31; i++) {
a[0] = GETBIT(x[0] ^ c[0], i);
a[1] = GETBIT(x[1] ^ c[1], i);
a[2] = GETBIT(x[2] ^ c[2], i);
b[0] = GETBIT(y[0] ^ c[0], i);
b[1] = GETBIT(y[1] ^ c[1], i);
b[2] = GETBIT(y[2] ^ c[2], i);
t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i);
SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i));
t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i);
SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i));
t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i);
SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i));
}
z[0] = x[0] ^ y[0] ^ c[0];
z[1] = x[1] ^ y[1] ^ c[1];
z[2] = x[2] ^ y[2] ^ c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3],
int *countY) {
uint32_t c[3] = {0};
uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount),
getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for (int i = 0; i < 31; i++) {
a[0] = GETBIT(x[0] ^ c[0], i);
a[1] = GETBIT(x[1] ^ c[1], i);
a[2] = GETBIT(x[2] ^ c[2], i);
b[0] = GETBIT(y ^ c[0], i);
b[1] = GETBIT(y ^ c[1], i);
b[2] = GETBIT(y ^ c[2], i);
t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i);
SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i));
t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i);
SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i));
t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i);
SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i));
}
z[0] = x[0] ^ y ^ c[0];
z[1] = x[1] ^ y ^ c[1];
z[2] = x[2] ^ y ^ c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
int sha256(unsigned char *result, unsigned char *input, int numBits) {
uint32_t hA[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
if (numBits > 447) {
printf("Input too long, aborting!");
return -1;
}
int chars = numBits >> 3;
unsigned char *chunk = calloc(64, 1); //512 bits
memcpy(chunk, input, chars);
chunk[chars] = 0x80;
//Last 8 chars used for storing length of input without padding, in big-endian.
//Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest
//chunk[60] = numBits >> 24;
//chunk[61] = numBits >> 16;
chunk[62] = numBits >> 8;
chunk[63] = numBits;
uint32_t w[64];
int i;
for (i = 0; i < 16; i++) {
w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16)
| (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3];
}
uint32_t s0, s1;
for (i = 16; i < 64; i++) {
s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18)
^ (w[i - 15] >> 3);
s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19)
^ (w[i - 2] >> 10);
w[i] = w[i - 16] + s0 + w[i - 7] + s1;
}
uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj;
a = hA[0];
b = hA[1];
c = hA[2];
d = hA[3];
e = hA[4];
f = hA[5];
g = hA[6];
h = hA[7];
for (i = 0; i < 64; i++) {
s1 = RIGHTROTATE(e, 6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25);
temp1 = h + s1 + CH(e, f, g) + k[i] + w[i];
s0 = RIGHTROTATE(a, 2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22);
maj = (a & (b ^ c)) ^ (b & c);
temp2 = s0 + maj;
h = g;
g = f;
f = e;
e = d + temp1;
d = c;
c = b;
b = a;
a = temp1 + temp2;
}
hA[0] += a;
hA[1] += b;
hA[2] += c;
hA[3] += d;
hA[4] += e;
hA[5] += f;
hA[6] += g;
hA[7] += h;
for (i = 0; i < 8; i++) {
result[i * 4] = (hA[i] >> 24);
result[i * 4 + 1] = (hA[i] >> 16);
result[i * 4 + 2] = (hA[i] >> 8);
result[i * 4 + 3] = hA[i];
}
return 0;
}
void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) {
z[0] = RIGHTROTATE(x[0], i);
z[1] = RIGHTROTATE(x[1], i);
z[2] = RIGHTROTATE(x[2], i);
}
void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) {
z[0] = x[0] >> i;
z[1] = x[1] >> i;
z[2] = x[2] >> i;
}
void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int *randCount,
View views[3], int *countY) {
uint32_t t0[3];
uint32_t t1[3];
mpc_XOR(a, b, t0);
mpc_XOR(a, c, t1);
mpc_AND(t0, t1, z, randomness, randCount, views, countY);
mpc_XOR(z, a, z);
}
void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int *randCount,
View views[3], int *countY) {
uint32_t t0[3];
//e & (f^g) ^ g
mpc_XOR(f, g, t0);
mpc_AND(e, t0, t0, randomness, randCount, views, countY);
mpc_XOR(t0, g, z);
}
int mpc_sha256(unsigned char *results[3], unsigned char *inputs[3], int numBits, unsigned char *randomness[3],
View views[3], int *countY) {
if (numBits > 447) {
printf("Input too long, aborting!");
return -1;
}
int *randCount = calloc(1, sizeof(int));
int chars = numBits >> 3;
unsigned char *chunks[3];
uint32_t w[64][3];
for (int i = 0; i < 3; i++) {
chunks[i] = calloc(64, 1); //512 bits
memcpy(chunks[i], inputs[i], chars);
chunks[i][chars] = 0x80;
//Last 8 chars used for storing length of input without padding, in big-endian.
//Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest
//chunk[60] = numBits >> 24;
//chunk[61] = numBits >> 16;
chunks[i][62] = numBits >> 8;
chunks[i][63] = numBits;
memcpy(views[i].x, chunks[i], 64);
for (int j = 0; j < 16; j++) {
w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16)
| (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3];
}
free(chunks[i]);
}
uint32_t s0[3], s1[3];
uint32_t t0[3], t1[3];
for (int j = 16; j < 64; j++) {
//s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3);
mpc_RIGHTROTATE(w[j - 15], 7, t0);
mpc_RIGHTROTATE(w[j - 15], 18, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j - 15], 3, t1);
mpc_XOR(t0, t1, s0);
//s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10);
mpc_RIGHTROTATE(w[j - 2], 17, t0);
mpc_RIGHTROTATE(w[j - 2], 19, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j - 2], 10, t1);
mpc_XOR(t0, t1, s1);
//w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i];
mpc_ADD(w[j - 16], s0, t1, randomness, randCount, views, countY);
mpc_ADD(w[j - 7], t1, t1, randomness, randCount, views, countY);
mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY);
}
uint32_t a[3] = {hA[0], hA[0], hA[0]};
uint32_t b[3] = {hA[1], hA[1], hA[1]};
uint32_t c[3] = {hA[2], hA[2], hA[2]};
uint32_t d[3] = {hA[3], hA[3], hA[3]};
uint32_t e[3] = {hA[4], hA[4], hA[4]};
uint32_t f[3] = {hA[5], hA[5], hA[5]};
uint32_t g[3] = {hA[6], hA[6], hA[6]};
uint32_t h[3] = {hA[7], hA[7], hA[7]};
uint32_t temp1[3], temp2[3], maj[3];
for (int i = 0; i < 64; i++) {
//s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25);
mpc_RIGHTROTATE(e, 6, t0);
mpc_RIGHTROTATE(e, 11, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(e, 25, t1);
mpc_XOR(t0, t1, s1);
//ch = (e & f) ^ ((~e) & g);
//temp1 = h + s1 + CH(e,f,g) + k[i]+w[i];
//t0 = h + s1
mpc_ADD(h, s1, t0, randomness, randCount, views, countY);
mpc_CH(e, f, g, t1, randomness, randCount, views, countY);
//t1 = t0 + t1 (h+s1+ch)
mpc_ADD(t0, t1, t1, randomness, randCount, views, countY);
mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY);
mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY);
//s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22);
mpc_RIGHTROTATE(a, 2, t0);
mpc_RIGHTROTATE(a, 13, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(a, 22, t1);
mpc_XOR(t0, t1, s0);
mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY);
//temp2 = s0+maj;
mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY);
memcpy(h, g, sizeof(uint32_t) * 3);
memcpy(g, f, sizeof(uint32_t) * 3);
memcpy(f, e, sizeof(uint32_t) * 3);
//e = d+temp1;
mpc_ADD(d, temp1, e, randomness, randCount, views, countY);
memcpy(d, c, sizeof(uint32_t) * 3);
memcpy(c, b, sizeof(uint32_t) * 3);
memcpy(b, a, sizeof(uint32_t) * 3);
//a = temp1+temp2;
mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY);
}
uint32_t hHa[8][3] = {{hA[0], hA[0], hA[0]},
{hA[1], hA[1], hA[1]},
{hA[2], hA[2], hA[2]},
{hA[3], hA[3], hA[3]},
{hA[4], hA[4], hA[4]},
{hA[5], hA[5], hA[5]},
{hA[6], hA[6], hA[6]},
{hA[7], hA[7], hA[7]}};
mpc_ADD(hHa[0], a, hHa[0], randomness, randCount, views, countY);
mpc_ADD(hHa[1], b, hHa[1], randomness, randCount, views, countY);
mpc_ADD(hHa[2], c, hHa[2], randomness, randCount, views, countY);
mpc_ADD(hHa[3], d, hHa[3], randomness, randCount, views, countY);
mpc_ADD(hHa[4], e, hHa[4], randomness, randCount, views, countY);
mpc_ADD(hHa[5], f, hHa[5], randomness, randCount, views, countY);
mpc_ADD(hHa[6], g, hHa[6], randomness, randCount, views, countY);
mpc_ADD(hHa[7], h, hHa[7], randomness, randCount, views, countY);
for (int i = 0; i < 8; i++) {
mpc_RIGHTSHIFT(hHa[i], 24, t0);
results[0][i * 4] = t0[0];
results[1][i * 4] = t0[1];
results[2][i * 4] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 16, t0);
results[0][i * 4 + 1] = t0[0];
results[1][i * 4 + 1] = t0[1];
results[2][i * 4 + 1] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 8, t0);
results[0][i * 4 + 2] = t0[0];
results[1][i * 4 + 2] = t0[1];
results[2][i * 4 + 2] = t0[2];
results[0][i * 4 + 3] = hHa[i][0];
results[1][i * 4 + 3] = hHa[i][1];
results[2][i * 4 + 3] = hHa[i][2];
}
free(randCount);
return 0;
}
int writeToFile(char filename[], void *data, int size, int numItems) {
FILE *file;
file = fopen(filename, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
fwrite(data, size, numItems, file);
fclose(file);
return 0;
}
a commit(int numBytes, unsigned char shares[3][numBytes], unsigned char *randomness[3], View views[3]) {
unsigned char *inputs[3];
inputs[0] = shares[0];
inputs[1] = shares[1];
inputs[2] = shares[2];
unsigned char *hashes[3];
hashes[0] = malloc(32);
hashes[1] = malloc(32);
hashes[2] = malloc(32);
int *countY = calloc(1, sizeof(int));
mpc_sha256(hashes, inputs, numBytes * 8, randomness, views, countY);
//Explicitly add y to view
for (int i = 0; i < 8; i++) {
views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16)
| (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3];
views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16)
| (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3];
views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16)
| (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3];
*countY += 1;
}
free(countY);
free(hashes[0]);
free(hashes[1]);
free(hashes[2]);
uint32_t *result1 = malloc(32);
output(views[0], result1);
uint32_t *result2 = malloc(32);
output(views[1], result2);
uint32_t *result3 = malloc(32);
output(views[2], result3);
a a;
memcpy(a.yp[0], result1, 32);
memcpy(a.yp[1], result2, 32);
memcpy(a.yp[2], result3, 32);
free(result1);
free(result2);
free(result3);
return a;
}
Z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], V views[3]) {
Z z;
memcpy(z.ke, keys[e], 16);
memcpy(z.ke1, keys[(e + 1) % 3], 16);
z.ve = views[e];
z.ve1 = views[(e + 1) % 3];
memcpy(z.re, rs[e], 4);
memcpy(z.re1, rs[(e + 1) % 3], 4);
return z;
}
Z *packSHA256A(int *es, sha256TotalViews sha256Views) {
Z *zs = malloc(sizeof(Z) * ITERATIONS);
for (int i = 0; i < ITERATIONS; i++) {
zs[i] = prove((es[i] + 1) % 3, sha256Views.keys[i], sha256Views.rs[i], sha256Views.localViews[i]);
}
return zs;
}
void out(V v, uint32_t *result) {
memcpy(result, &v.y[ySize - 8], 32);
}
z zFromZ(Z zz) {
z z;
memcpy(z.ve.x, zz.ve.x, sizeof(char) * 64);
memcpy(z.ve.y, zz.ve.y, sizeof(uint32_t) * ySize);
memcpy(z.ve1.x, zz.ve1.x, sizeof(char) * 64);
memcpy(z.ve1.y, zz.ve1.y, sizeof(uint32_t) * ySize);
memcpy(z.re, zz.re, sizeof(char) * 4);
memcpy(z.re1, zz.re1, sizeof(char) * 4);
memcpy(z.ke, zz.ke, sizeof(char) * 16);
memcpy(z.ke1, zz.ke1, sizeof(char) * 16);
return z;
}
sha256TotalViews runSHA256Proof(unsigned char shares[ITERATIONS][3][SECRET_LENGTH / 8]) {
setbuf(stdout, NULL);
srand((unsigned) time(NULL));
init_EVP();
openmp_thread_setup();
unsigned char garbage[4];
if (RAND_bytes(garbage, 4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
exit(2);
}
sha256TotalViews sha256Views;
//Generating keys
if (RAND_bytes(sha256Views.keys, ITERATIONS * 3 * 16) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
exit(2);
}
if (RAND_bytes(sha256Views.keys, ITERATIONS * 3 * 4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
exit(2);
}
//Generating randomness
unsigned char *randomness[ITERATIONS][3];
#pragma omp parallel for default(none) shared(randomness, sha256Views)
for (int k = 0; k < ITERATIONS; k++) {
for (int j = 0; j < 3; j++) {
randomness[k][j] = malloc(2912 * sizeof(unsigned char));
getAllRandomness(sha256Views.keys[k][j], randomness[k][j]);
}
}
//Running MPC-SHA2
#pragma omp parallel for default(none) shared(sha256Views, shares, randomness)
for (int k = 0; k < ITERATIONS; k++) {
a iterationViews = commit(SECRET_LENGTH / 8, shares[k], randomness[k], sha256Views.localViews[k]);
memcpy(sha256Views.as[k].yp, &iterationViews.yp, sizeof(int) * 3 * 8);
for (int j = 0; j < 3; j++) {
free(randomness[k][j]);
}
}
for (int k = 0; k < ITERATIONS; k++) {
unsigned char hash1[SHA256_DIGEST_LENGTH];
CMT(sha256Views.keys[k][0], sha256Views.localViews[k][0], sha256Views.rs[k][0], &hash1);
memcpy(sha256Views.as[k].h[0], &hash1, 32);
CMT(sha256Views.keys[k][1], sha256Views.localViews[k][1], sha256Views.rs[k][1], &hash1);
memcpy(sha256Views.as[k].h[1], &hash1, 32);
CMT(sha256Views.keys[k][2], sha256Views.localViews[k][2], sha256Views.rs[k][2], &hash1);
memcpy(sha256Views.as[k].h[2], &hash1, 32);
}
return sha256Views;
}
int verifySHA256(A a, int e, Z zz, unsigned char inputs[2][SECRET_LENGTH / 8]) {
z z = zFromZ(zz);
unsigned char* hash = malloc(SHA256_DIGEST_LENGTH);
H(z.ke, z.ve, z.re, hash);
if (memcmp(a.h[e], hash, 32) != 0) {
printf("Failing at %d", __LINE__);
return 1;
}
H(z.ke1, z.ve1, z.re1, hash);
if (memcmp(a.h[(e + 1) % 3], hash, 32) != 0) {
printf("Failing at %d", __LINE__);
return 1;
}
free(hash);
uint32_t* result = malloc(32);
output(z.ve, result);
if (memcmp(a.yp[e], result, 32) != 0) {
printf("Failing at %d", __LINE__);
return 1;
}
output(z.ve1, result);
if (memcmp(a.yp[(e + 1) % 3], result, 32) != 0) {
printf("Failing at %d", __LINE__);
return 1;
}
free(result);
unsigned char randomness[2][2912];
getAllRandomness(z.ke, randomness[0]);
getAllRandomness(z.ke1, randomness[1]);
int* randCount = calloc(1, sizeof(int));
int* countY = calloc(1, sizeof(int));
uint32_t w[64][2];
for (int j = 0; j < 16; j++) {
w[j][0] = (z.ve.x[j * 4] << 24) | (z.ve.x[j * 4 + 1] << 16)
| (z.ve.x[j * 4 + 2] << 8) | z.ve.x[j * 4 + 3];
w[j][1] = (z.ve1.x[j * 4] << 24) | (z.ve1.x[j * 4 + 1] << 16)
| (z.ve1.x[j * 4 + 2] << 8) | z.ve1.x[j * 4 + 3];
}
uint32_t s0[2], s1[2];
uint32_t t0[2], t1[2];
for (int j = 16; j < 64; j++) {
//s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3);
mpc_RIGHTROTATE2(w[j-15], 7, t0);
mpc_RIGHTROTATE2(w[j-15], 18, t1);
mpc_XOR2(t0, t1, t0);
mpc_RIGHTSHIFT2(w[j-15], 3, t1);
mpc_XOR2(t0, t1, s0);
//s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10);
mpc_RIGHTROTATE2(w[j-2], 17, t0);
mpc_RIGHTROTATE2(w[j-2], 19, t1);
mpc_XOR2(t0, t1, t0);
mpc_RIGHTSHIFT2(w[j-2],10,t1);
mpc_XOR2(t0, t1, s1);
//w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i];
if(mpc_ADD_verify(w[j-16], s0, t1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, j);
return 1;
}
if(mpc_ADD_verify(w[j-7], t1, t1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, j);
return 1;
}
if(mpc_ADD_verify(t1, s1, w[j], z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, j);
return 1;
}
}
uint32_t va[2] = { hA[0],hA[0] };
uint32_t vb[2] = { hA[1],hA[1] };
uint32_t vc[2] = { hA[2],hA[2] };
uint32_t vd[2] = { hA[3],hA[3] };
uint32_t ve[2] = { hA[4],hA[4] };
uint32_t vf[2] = { hA[5],hA[5] };
uint32_t vg[2] = { hA[6],hA[6] };
uint32_t vh[2] = { hA[7],hA[7] };
uint32_t temp1[3], temp2[3], maj[3];
for (int i = 0; i < 64; i++) {
//s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25);
mpc_RIGHTROTATE2(ve, 6, t0);
mpc_RIGHTROTATE2(ve, 11, t1);
mpc_XOR2(t0, t1, t0);
mpc_RIGHTROTATE2(ve, 25, t1);
mpc_XOR2(t0, t1, s1);
//ch = (e & f) ^ ((~e) & g);
//temp1 = h + s1 + CH(e,f,g) + k[i]+w[i];
//t0 = h + s1
if(mpc_ADD_verify(vh, s1, t0, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
if(mpc_CH_verify(ve, vf, vg, t1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
//t1 = t0 + t1 (h+s1+ch)
if(mpc_ADD_verify(t0, t1, t1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
t0[0] = k[i];
t0[1] = k[i];
if(mpc_ADD_verify(t1, t0, t1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
if(mpc_ADD_verify(t1, w[i], temp1, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
//s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22);
mpc_RIGHTROTATE2(va, 2, t0);
mpc_RIGHTROTATE2(va, 13, t1);
mpc_XOR2(t0, t1, t0);
mpc_RIGHTROTATE2(va, 22, t1);
mpc_XOR2(t0, t1, s0);
//maj = (a & (b ^ c)) ^ (b & c);
//(a & b) ^ (a & c) ^ (b & c)
if(mpc_MAJ_verify(va, vb, vc, maj, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
//temp2 = s0+maj;
if(mpc_ADD_verify(s0, maj, temp2, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
memcpy(vh, vg, sizeof(uint32_t) * 2);
memcpy(vg, vf, sizeof(uint32_t) * 2);
memcpy(vf, ve, sizeof(uint32_t) * 2);
//e = d+temp1;
if(mpc_ADD_verify(vd, temp1, ve, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
memcpy(vd, vc, sizeof(uint32_t) * 2);
memcpy(vc, vb, sizeof(uint32_t) * 2);
memcpy(vb, va, sizeof(uint32_t) * 2);
//a = temp1+temp2;
if(mpc_ADD_verify(temp1, temp2, va, z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d, iteration %d", __LINE__, i);
return 1;
}
}
uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] },
{ hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } };
if(mpc_ADD_verify(hHa[0], va, hHa[0], z.ve, z.ve1, randomness, randCount, countY) == 1) {
printf("Failing at %d", __LINE__);
return 1;
}
if(mpc_ADD_verify(hHa[1], vb, hHa[1], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[2], vc, hHa[2], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[3], vd, hHa[3], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[4], ve, hHa[4], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[5], vf, hHa[5], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[6], vg, hHa[6], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
if(mpc_ADD_verify(hHa[7], vh, hHa[7], z.ve, z.ve1, randomness, randCount, countY) == 1) {
#if VERBOSE
printf("Failing at %d", __LINE__);
#endif
return 1;
}
free(randCount);
free(countY);
return 0;
}
|
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd64_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
|
ctrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrtri.c, normal z -> c, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trtri
*
* Computes the inverse of an upper or lower triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix, in the same storage format.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit.
* @retval < 0 if -i, the i-th argument had an illegal value.
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
*******************************************************************************
*
* @sa plasma_ctrtri
* @sa plasma_dtrtri
* @sa plasma_strtri
*
******************************************************************************/
int plasma_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n, plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trtri(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_ctrtri(uplo, diag, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_trtri
*
* Computes the inverse of a complex upper or lower triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix, in the same storage format.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ctrtri
* @sa plasma_omp_ctrtri
* @sa plasma_omp_ctrtri
* @sa plasma_omp_dtrtri
* @sa plasma_omp_strtri
*
******************************************************************************/
void plasma_omp_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0)
return;
// Call the parallel function.
plasma_pctrtri(uplo, diag, A, sequence, request);
}
|
SpatialConvolutionLocal.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c"
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THTensor *input, THTensor *gradOutput,
THTensor *weight, THTensor *bias,
int kH, int kW, int dH,
int dW, int padH, int padW,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
int64_t nInputPlane = weight->size[2] / (kH * kW);
int64_t nOutputPlane = weight->size[1];
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane);
THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight);
THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight_local)(THTensor *_weight)
{
THTensor *weight = THTensor_(newContiguous)(_weight);
THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4,
"weight tensor should be 3D or 6D - got %dD", weight->nDimension);
if (weight->nDimension == 6) {
int64_t s1 = weight->size[0] * weight->size[1];
int64_t s2 = weight->size[2];
int64_t s3 = weight->size[3] * weight->size[4] * weight->size[5];
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage3d)(weight->storage,
weight->storageOffset,
s1, -1, s2, -1, s3, -1);
THTensor_(free)(old_weight);
}
return weight;
}
static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
(
THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias, THTensor *finput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
int64_t i;
THTensor *output3d, *finput3d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
THTensor_(copy)(output, bias);
output3d = THTensor_(newWithStorage3d)
(output->storage, output->storageOffset,
outputHeight * outputWidth, 1,
nOutputPlane, outputHeight * outputWidth,
1, nOutputPlane * outputHeight * outputWidth);
finput3d = THTensor_(newWithStorage3d)
(finput->storage, finput->storageOffset,
outputHeight * outputWidth, 1,
kW * kH * nInputPlane, outputHeight * outputWidth,
1, kW * kH * nInputPlane * outputHeight * outputWidth);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
// finput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
THTensor_(free)(output3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
int64_t nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
int64_t nOutputPlane = THTensor_(size)(weight, 1);
if(input->nDimension == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input, output, weight, bias, finput,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(THTensor *gradInput, THTensor *gradOutput,
THTensor *weight, THTensor *fgradInput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *fgradInput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
fgradInput3d = THTensor_(newWithStorage3d)(fgradInput->storage, fgradInput->storageOffset,
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
// gradOutput3d: oH*oW x nOutputPlane x 1
THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
// fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(free)(gradOutput3d);
THTensor_(free)(fgradInput3d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
}
void THNN_(SpatialConvolutionLocal_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(weight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(weight,1);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 1, 2);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput, gradOutput, tweight,
fgradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput_t, gradOutput_t, tweight, fgradInput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
THTensor *finput, real scale,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *finput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THTensor_(newWithStorage3d)(finput->storage, finput->storageOffset,
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
// gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
THTensor_(cadd)(gradBias, gradBias, scale, gradOutput);
THTensor_(free)(gradOutput3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight,
accreal scale_)
{
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
gradWeight = THNN_(view_weight_local)(gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(gradWeight,1);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput, gradWeight, gradBias, finput, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput_t, gradWeight, gradBias, finput_t, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
jacobi-block-task-dep.c | # include "poisson.h"
/* #pragma omp task/taskwait version of SWEEP. */
void sweep (int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_, int block_size)
{
#ifdef _OPENMP
double (*f)[nx][ny] = (double (*)[nx][ny])f_;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
#endif
if (block_size == 0)
block_size = nx;
int max_blocks_x = (nx / block_size);
int max_blocks_y = (ny / block_size);
#pragma omp parallel
#pragma omp single
{
int it;
int block_x, block_y;
for (it = itold + 1; it <= itnew; it++) {
/*
for (block_x = 0; block_x < max_blocks_x; block_x++) {
for (block_y = 0; block_y < max_blocks_y; block_y++) {
#pragma omp task shared(u_, unew_, block_size, nx, ny) firstprivate(block_x, block_y) \
depend(in: unew[block_x * block_size: block_size][block_y * block_size: block_size]) \
depend(out: u[block_x * block_size: block_size][block_y * block_size: block_size])
copy_block(nx, ny, block_x, block_y, u_, unew_, block_size);
}
}
*/
// Compute a new estimate.
for (block_x = 0; block_x < max_blocks_x; block_x++) {
for (block_y = 0; block_y < max_blocks_y; block_y++) {
int xdm1 = block_x == 0 ? 0 : 1;
int xdp1 = block_x == max_blocks_x-1 ? 0 : +1;
int ydp1 = block_y == max_blocks_y-1 ? 0 : +1;
int ydm1 = block_y == 0 ? 0 : 1;
#pragma omp task shared(u_, unew_, f_, dx, dy, nx, ny, block_size) \
firstprivate(block_x, block_y, xdm1, xdp1, ydp1, ydm1) \
depend(inout: unew[ block_x * block_size][ block_y * block_size], \
u[ block_x * block_size][ block_y * block_size]) \
depend(in : f[ block_x * block_size][ block_y * block_size], \
u[(block_x - xdm1) * block_size][ block_y * block_size], \
u[ block_x * block_size][(block_y + ydp1) * block_size], \
u[ block_x * block_size][(block_y - ydm1) * block_size], \
u[(block_x + xdp1) * block_size][ block_y * block_size])
{ //compute_estimate(block_x, block_y, u_, unew_, f_, dx, dy, nx, ny, block_size);
copy_block(nx, ny, block_x, block_y, u_, unew_, block_size);
int i, j;
int start_i = block_x * block_size;
int start_j = block_y * block_size;
for (i = start_i; i < start_i + block_size; i++) {
for (j = start_j; j < start_j + block_size; j++) {
if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) {
(*unew)[i][j] = (*f)[i][j];
} else {
(*unew)[i][j] = 0.25 * ( (*u)[i-1][j] + (*u)[i][j+1]
+ (*u)[i][j-1] + (*u)[i+1][j]
+ (*f)[i][j] * dx * dy );
}
}
}
}
}
}
}
}
}
|
TestOpenMp.h | #include <HostDeviceTest.h>
#include <template_tensors/TemplateTensors.h>
#include <atomic>
BOOST_AUTO_TEST_CASE(openmp_for_each)
{
size_t array[100];
for (size_t i = 0; i < 100; i++)
{
array[i] = i;
}
openmp::ForEach::for_each(std::begin(array), std::end(array), [](size_t& i){i += 1;});
for (size_t i = 0; i < 100; i++)
{
BOOST_CHECK(array[i] == i + 1);
}
}
BOOST_AUTO_TEST_CASE(openmp_enabled)
{
std::atomic<size_t> i;
i = 0;
#pragma omp parallel num_threads(4)
{
i += 1;
}
BOOST_CHECK(i == 4);
} |
gemv_c_csc_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include <memory.h>
static alphasparse_status_t
gemv_csc_trans_omp_1(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
ALPHA_INT m = A->rows;
ALPHA_INT n = A->cols;
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_INT partition[num_threads + 1];
balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_m_s = partition[tid];
ALPHA_INT local_m_e = partition[tid + 1];
for (ALPHA_INT i = local_m_s; i < local_m_e; i++)
{
register ALPHA_Number tmp0;
register ALPHA_Number tmp1;
register ALPHA_Number tmp2;
register ALPHA_Number tmp3;
alpha_setzero(tmp0);
alpha_setzero(tmp1);
alpha_setzero(tmp2);
alpha_setzero(tmp3);
ALPHA_INT pks = A->cols_start[i];
ALPHA_INT pke = A->cols_end[i];
ALPHA_INT pkl = pke - pks;
ALPHA_INT pkl4 = pkl - 4;
ALPHA_INT col_ind0, col_ind1, col_ind2, col_ind3;
ALPHA_Number *A_val = &A->values[pks];
ALPHA_INT *A_col = &A->row_indx[pks];
ALPHA_INT pi;
for (pi = 0; pi < pkl4; pi += 4)
{
register ALPHA_Number c0;
register ALPHA_Number c1;
register ALPHA_Number c2;
register ALPHA_Number c3;
col_ind0 = A_col[pi];
col_ind1 = A_col[pi + 1];
col_ind2 = A_col[pi + 2];
col_ind3 = A_col[pi + 3];
alpha_conj(c0, A_val[pi]);
alpha_conj(c1, A_val[pi + 1]);
alpha_conj(c2, A_val[pi + 2]);
alpha_conj(c3, A_val[pi + 3]);
alpha_madde(tmp0, c0, x[col_ind0]);
alpha_madde(tmp1, c1, x[col_ind1]);
alpha_madde(tmp2, c2, x[col_ind2]);
alpha_madde(tmp3, c3, x[col_ind3]);
}
for (; pi < pkl; pi += 1)
{
register ALPHA_Number c0;
alpha_conj(c0, A_val[pi]);
alpha_madde(tmp0, c0, x[A_col[pi]]);
}
alpha_add(tmp0, tmp0, tmp1);
alpha_add(tmp2, tmp2, tmp3);
alpha_add(tmp0, tmp0, tmp2);
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, tmp0);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return gemv_csc_trans_omp_1(alpha, A, x, beta, y);
}
|
ecn2_opt.c | /*
* MIRACL E(F_p^2) support functions
* mrecn2.c
*
*/
#include <stdlib.h>
#include "miracl.h"
#ifdef MR_STATIC
#include <string.h>
#endif
static inline void zzn2_div2_i(zzn2 *w)
{
moddiv2(w->a->w);
w->a->len=2;
moddiv2(w->b->w);
w->b->len=2;
}
static inline void zzn2_tim2_i(zzn2 *w)
{
#ifdef MR_COUNT_OPS
fpa+=2;
#endif
modtim2(w->a->w);
modtim2(w->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_tim3_i(zzn2 *w)
{
#ifdef MR_COUNT_OPS
fpa+=4;
#endif
modtim3(w->a->w);
modtim3(w->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_copy_i(zzn2 *x,zzn2 *w)
{
if (x==w) return;
w->a->len=x->a->len;
w->a->w[0]=x->a->w[0];
w->a->w[1]=x->a->w[1];
w->b->len=x->b->len;
w->b->w[0]=x->b->w[0];
w->b->w[1]=x->b->w[1];
}
static inline void zzn2_add_i(zzn2 *x,zzn2 *y,zzn2 *w)
{
#ifdef MR_COUNT_OPS
fpa+=2;
#endif
modadd(x->a->w,y->a->w,w->a->w);
modadd(x->b->w,y->b->w,w->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_sub_i(zzn2 *x,zzn2 *y,zzn2 *w)
{
#ifdef MR_COUNT_OPS
fpa+=2;
#endif
modsub(x->a->w,y->a->w,w->a->w);
modsub(x->b->w,y->b->w,w->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_timesi_i(zzn2 *u)
{
mr_small w1[2];
w1[0]=u->a->w[0];
w1[1]=u->a->w[1];
u->a->w[0]=u->b->w[0];
u->a->w[1]=u->b->w[1];
modneg(u->a->w);
u->b->w[0]=w1[0];
u->b->w[1]=w1[1];
}
static inline void zzn2_txx_i(zzn2 *u)
{
/* multiply w by t^2 where x^2-t is irreducible polynomial for ZZn4
for p=5 mod 8 t=sqrt(sqrt(-2)), qnr=-2
for p=3 mod 8 t=sqrt(1+sqrt(-1)), qnr=-1
for p=7 mod 8 and p=2,3 mod 5 t=sqrt(2+sqrt(-1)), qnr=-1 */
zzn2 t;
struct bigtype aa,bb;
big a,b;
mr_small w3[2],w4[2];
a=&aa;
b=&bb;
a->len=2;
b->len=2;
a->w=w3;
b->w=w4;
t.a=a;
t.b=b;
zzn2_copy_i(u,&t);
zzn2_timesi_i(u);
zzn2_add_i(u,&t,u);
zzn2_add_i(u,&t,u);
u->a->len=2;
u->b->len=2;
}
static inline void zzn2_pmul_i(int i,zzn2 *x)
{
modpmul(i,x->a->w);
modpmul(i,x->b->w);
}
static inline void zzn2_sqr_i(zzn2 *x,zzn2 *w)
{
static mr_small w1[2],w2[2];
#ifdef MR_COUNT_OPS
fpa+=3;
fpc+=2;
#endif
modadd(x->a->w,x->b->w,w1);
modsub(x->a->w,x->b->w,w2);
modmult(x->a->w,x->b->w,w->b->w);
modmult(w1,w2,w->a->w); // routine that calculates (a+b)(a-b) ??
modtim2(w->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_dblsub_i(zzn2 *x,zzn2 *y,zzn2 *w)
{
#ifdef MR_COUNT_OPS
fpa+=4;
#endif
moddblsub(w->a->w,x->a->w,y->a->w);
moddblsub(w->b->w,x->b->w,y->b->w);
w->a->len=2;
w->b->len=2;
}
static inline void zzn2_mul_i(zzn2 *x,zzn2 *y,zzn2 *w)
{
static mr_small w1[2],w2[2],w5[2];
#ifdef MR_COUNT_OPS
fpa+=5;
fpc+=3;
#endif
/*#pragma omp parallel sections
{
#pragma omp section */
modmult(x->a->w,y->a->w,w1);
/* #pragma omp section */
modmult(x->b->w,y->b->w,w2);
/*}*/
modadd(x->a->w,x->b->w,w5);
modadd(y->a->w,y->b->w,w->b->w);
modmult(w->b->w,w5,w->b->w);
moddblsub(w->b->w,w1,w2); /* w->b->w - w1 -w2 */
modsub(w1,w2,w->a->w);
w->a->len=2;
w->b->len=2;
}
void zzn2_inv_i(_MIPD_ zzn2 *w)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
#ifdef MR_COUNT_OPS
fpc+=4;
fpa+=1;
#endif
MR_IN(163)
modsqr(w->a->w,mr_mip->w1->w);
modsqr(w->b->w,mr_mip->w2->w);
modadd(mr_mip->w1->w,mr_mip->w2->w,mr_mip->w1->w);
mr_mip->w1->len=2;
/* redc(_MIPP_ mr_mip->w1,mr_mip->w6); */
copy(mr_mip->w1,mr_mip->w6);
xgcd(_MIPP_ mr_mip->w6,mr_mip->modulus,mr_mip->w6,mr_mip->w6,mr_mip->w6);
/* nres(_MIPP_ mr_mip->w6,mr_mip->w6); */
modmult(w->a->w,mr_mip->w6->w,w->a->w);
modneg(mr_mip->w6->w);
modmult(w->b->w,mr_mip->w6->w,w->b->w);
MR_OUT
}
BOOL nres_sqroot(_MIPD_ big x,big w)
{ /* w=sqrt(x) mod p. This depends on p being prime! */
int i,t,js;
#ifdef MR_COUNT_OPS
fpc+=125;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return FALSE;
copy(x,w);
if (size(w)==0) return TRUE;
copy(w,mr_mip->w1);
for (i=0;i<25;i++)
{
modsqr(w->w,w->w);
modsqr(w->w,w->w);
modsqr(w->w,w->w);
modsqr(w->w,w->w);
modsqr(w->w,w->w);
}
w->len=2;
modsqr(w->w,mr_mip->w2->w);
mr_mip->w2->len=2;
if (mr_compare(mr_mip->w1,mr_mip->w2)!=0) {zero(w);return FALSE;}
return TRUE;
}
BOOL zzn2_sqrt(_MIPD_ zzn2 *u,zzn2 *w)
{ /* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
where i*i=n */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifdef MR_COUNT_OPS
fpc+=2;
fpa+=1;
#endif
if (mr_mip->ERNUM) return FALSE;
zzn2_copy(u,w);
if (zzn2_iszero(w)) return TRUE;
MR_IN(204)
modsqr(w->b->w,mr_mip->w7->w);
modsqr(w->a->w,mr_mip->w1->w);
modadd(mr_mip->w1->w,mr_mip->w7->w,mr_mip->w7->w);
mr_mip->w7->len=2;
// nres_modmult(_MIPP_ w->b,w->b,mr_mip->w7);
// nres_modmult(_MIPP_ w->a,w->a,mr_mip->w1);
// nres_modadd(_MIPP_ mr_mip->w7,mr_mip->w1,mr_mip->w7);
if (!nres_sqroot(_MIPP_ mr_mip->w7,mr_mip->w7)) /* s=w7 */
{
zzn2_zero(w);
MR_OUT
return FALSE;
}
#ifdef MR_COUNT_OPS
fpa+=1;
#endif
modadd(w->a->w,mr_mip->w7->w,mr_mip->w15->w);
moddiv2(mr_mip->w15->w);
mr_mip->w15->len=2;
// nres_modadd(_MIPP_ w->a,mr_mip->w7,mr_mip->w15);
// nres_div2(_MIPP_ mr_mip->w15,mr_mip->w15);
if (!nres_sqroot(_MIPP_ mr_mip->w15,mr_mip->w15))
{
#ifdef MR_COUNT_OPS
fpa+=1;
#endif
modsub(w->a->w,mr_mip->w7->w,mr_mip->w15->w);
moddiv2(mr_mip->w15->w);
mr_mip->w15->len=2;
// nres_modsub(_MIPP_ w->a,mr_mip->w7,mr_mip->w15);
// nres_div2(_MIPP_ mr_mip->w15,mr_mip->w15);
if (!nres_sqroot(_MIPP_ mr_mip->w15,mr_mip->w15))
{
zzn2_zero(w);
MR_OUT
return FALSE;
}
// else printf("BBBBBBBBBBBBBBBBBB\n");
}
// else printf("AAAAAAAAAAAAAAAAAAA\n");
#ifdef MR_COUNT_OPS
fpa+=1;
#endif
copy(mr_mip->w15,w->a);
modadd(mr_mip->w15->w,mr_mip->w15->w,mr_mip->w15->w);
nres_moddiv(_MIPP_ w->b,mr_mip->w15,w->b);
MR_OUT
return TRUE;
}
/*
BOOL zzn2_multi_inverse(_MIPD_ int m,zzn2 *x,zzn2 *w)
{
int i;
zzn2 t1,t2;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (m==0) return TRUE;
if (m<0) return FALSE;
MR_IN(214)
if (x==w)
{
mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS);
MR_OUT
return FALSE;
}
if (m==1)
{
zzn2_copy_i(&x[0],&w[0]);
zzn2_inv_i(_MIPP_ &w[0]);
MR_OUT
return TRUE;
}
zzn2_from_int(_MIPP_ 1,&w[0]);
zzn2_copy_i(&x[0],&w[1]);
for (i=2;i<m;i++)
{
if (zzn2_isunity(_MIPP_ &x[i-1]))
zzn2_copy_i(&w[i-1],&w[i]);
else
zzn2_mul_i(&w[i-1],&x[i-1],&w[i]);
}
t1.a=mr_mip->w8;
t1.b=mr_mip->w9;
t2.a=mr_mip->w10;
t2.b=mr_mip->w11;
zzn2_mul_i(&w[m-1],&x[m-1],&t1);
if (zzn2_iszero(&t1))
{
mr_berror(_MIPP_ MR_ERR_DIV_BY_ZERO);
MR_OUT
return FALSE;
}
zzn2_inv_i(_MIPP_ &t1);
zzn2_copy_i(&x[m-1],&t2);
zzn2_mul_i(&w[m-1],&t1,&w[m-1]);
for (i=m-2;;i--)
{
if (i==0)
{
zzn2_mul_i(&t2,&t1,&w[0]);
break;
}
zzn2_mul_i(&w[i],&t2,&w[i]);
zzn2_mul_i(&w[i],&t1,&w[i]);
if (!zzn2_isunity(_MIPP_ &x[i])) zzn2_mul_i(&t2,&x[i],&t2);
}
MR_OUT
return TRUE;
}
*/
BOOL ecn2_iszero(ecn2 *a)
{
if (a->marker==MR_EPOINT_INFINITY) return TRUE;
return FALSE;
}
void ecn2_copy(ecn2 *a,ecn2 *b)
{
zzn2_copy_i(&(a->x),&(b->x));
zzn2_copy_i(&(a->y),&(b->y));
#ifndef MR_AFFINE_ONLY
if (a->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(a->z),&(b->z));
#endif
b->marker=a->marker;
}
void ecn2_zero(ecn2 *a)
{
zzn2_zero(&(a->x)); zzn2_zero(&(a->y));
#ifndef MR_AFFINE_ONLY
if (a->marker==MR_EPOINT_GENERAL) zzn2_zero(&(a->z));
#endif
a->marker=MR_EPOINT_INFINITY;
}
BOOL ecn2_compare(_MIPD_ ecn2 *a,ecn2 *b)
{
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return FALSE;
MR_IN(193)
ecn2_norm(_MIPP_ a);
ecn2_norm(_MIPP_ b);
MR_OUT
if (zzn2_compare(&(a->x),&(b->x)) && zzn2_compare(&(a->y),&(b->y)) && a->marker==b->marker) return TRUE;
return FALSE;
}
void ecn2_norm(_MIPD_ ecn2 *a)
{
zzn2 t;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_AFFINE_ONLY
if (mr_mip->ERNUM) return;
if (a->marker!=MR_EPOINT_GENERAL) return;
MR_IN(194)
zzn2_inv_i(_MIPP_ &(a->z));
t.a=mr_mip->w3;
t.b=mr_mip->w4;
zzn2_copy_i(&(a->z),&t);
zzn2_sqr_i( &(a->z),&(a->z));
zzn2_mul_i( &(a->x),&(a->z),&(a->x));
zzn2_mul_i( &(a->z),&t,&(a->z));
zzn2_mul_i( &(a->y),&(a->z),&(a->y));
zzn2_from_int(_MIPP_ 1,&(a->z));
a->marker=MR_EPOINT_NORMALIZED;
MR_OUT
#endif
}
void ecn2_get(_MIPD_ ecn2 *e,zzn2 *x,zzn2 *y,zzn2 *z)
{
zzn2_copy_i(&(e->x),x);
zzn2_copy_i(&(e->y),y);
#ifndef MR_AFFINE_ONLY
if (e->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(e->z),z);
else zzn2_from_zzn(mr_mip->one,z);
#endif
}
void ecn2_getxy(ecn2 *e,zzn2 *x,zzn2 *y)
{
zzn2_copy_i(&(e->x),x);
zzn2_copy_i(&(e->y),y);
}
void ecn2_getx(ecn2 *e,zzn2 *x)
{
zzn2_copy_i(&(e->x),x);
}
inline void zzn2_conj_i(zzn2 *x,zzn2 *w)
{
zzn2_copy_i(x,w);
modneg(w->b->w);
}
void ecn2_psi(_MIPD_ zzn2 *psi,ecn2 *P)
{
ecn2_norm(_MIPP_ P);
zzn2_conj_i(&(P->x),&(P->x));
zzn2_conj_i(&(P->y),&(P->y));
zzn2_mul_i(&(P->x),&psi[0],&(P->x));
zzn2_mul_i(&(P->y),&psi[1],&(P->y));
}
#ifndef MR_AFFINE_ONLY
void ecn2_getz(_MIPD_ ecn2 *e,zzn2 *z)
{
if (e->marker==MR_EPOINT_GENERAL) zzn2_copy_i(&(e->z),z);
else zzn2_from_zzn(mr_mip->one,z);
}
#endif
void ecn2_rhs(_MIPD_ zzn2 *x,zzn2 *rhs)
{ /* calculate RHS of elliptic curve equation */
BOOL twist;
zzn2 A,B;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return;
twist=mr_mip->TWIST;
MR_IN(202)
A.a=mr_mip->w10;
A.b=mr_mip->w11;
B.a=mr_mip->w12;
B.b=mr_mip->w13;
if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&A);
else zzn2_from_zzn(mr_mip->A,&A);
if (mr_abs(mr_mip->Bsize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Bsize,&B);
else zzn2_from_zzn(mr_mip->B,&B);
if (twist)
{
if (mr_mip->Asize==0 || mr_mip->Bsize==0)
{
if (mr_mip->Asize==0)
{
zzn2_txd(_MIPP_ &B);
}
if (mr_mip->Bsize==0)
{
zzn2_mul_i( &A,x,&B);
zzn2_txd(_MIPP_ &B);
}
zzn2_negate(_MIPP_ &B,&B);
}
else
{
zzn2_txx_i(&B);
zzn2_txx_i(&B);
zzn2_txx_i(&B);
zzn2_mul_i( &A,x,&A);
zzn2_txx_i(&A);
zzn2_txx_i(&A);
zzn2_add_i(&B,&A,&B);
}
}
else
{
zzn2_mul_i( &A,x,&A);
zzn2_add_i(&B,&A,&B);
}
zzn2_sqr_i( x,&A);
zzn2_mul_i( &A,x,&A);
zzn2_add_i(&B,&A,rhs);
MR_OUT
}
BOOL ecn2_set(_MIPD_ zzn2 *x,zzn2 *y,ecn2 *e)
{
zzn2 lhs,rhs;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return FALSE;
MR_IN(195)
lhs.a=mr_mip->w10;
lhs.b=mr_mip->w11;
rhs.a=mr_mip->w12;
rhs.b=mr_mip->w13;
ecn2_rhs(_MIPP_ x,&rhs);
zzn2_sqr_i( y,&lhs);
if (!zzn2_compare(&lhs,&rhs))
{
MR_OUT
return FALSE;
}
zzn2_copy_i(x,&(e->x));
zzn2_copy_i(y,&(e->y));
e->marker=MR_EPOINT_NORMALIZED;
MR_OUT
return TRUE;
}
#ifndef MR_NOSUPPORT_COMPRESSION
BOOL ecn2_setx(_MIPD_ zzn2 *x,ecn2 *e)
{
zzn2 rhs;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (mr_mip->ERNUM) return FALSE;
MR_IN(201)
rhs.a=mr_mip->w12;
rhs.b=mr_mip->w13;
ecn2_rhs(_MIPP_ x,&rhs);
if (!zzn2_iszero(&rhs))
{
if (!zzn2_sqrt(_MIPP_ &rhs,&rhs))
{
MR_OUT
return FALSE;
}
}
zzn2_copy_i(x,&(e->x));
zzn2_copy_i(&rhs,&(e->y));
e->marker=MR_EPOINT_NORMALIZED;
MR_OUT
return TRUE;
}
#endif
#ifndef MR_AFFINE_ONLY
void ecn2_setxyz(zzn2 *x,zzn2 *y,zzn2 *z,ecn2 *e)
{
zzn2_copy_i(x,&(e->x));
zzn2_copy_i(y,&(e->y));
zzn2_copy_i(z,&(e->z));
e->marker=MR_EPOINT_GENERAL;
}
#endif
void ecn2_negate(_MIPD_ ecn2 *u,ecn2 *w)
{
ecn2_copy(u,w);
if (!w->marker!=MR_EPOINT_INFINITY)
zzn2_negate(_MIPP_ &(w->y),&(w->y));
}
/*
BOOL ecn2_add2(_MIPD_ ecn2 *Q,ecn2 *P,zzn2 *lam,zzn2 *ex1)
{
BOOL Doubling;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
Doubling=ecn2_add3(_MIPP_ Q,P,lam,ex1,NULL);
return Doubling;
}
BOOL ecn2_add1(_MIPD_ ecn2 *Q,ecn2 *P,zzn2 *lam)
{
BOOL Doubling;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
Doubling=ecn2_add3(_MIPP_ Q,P,lam,NULL,NULL);
return Doubling;
}
*/
BOOL ecn2_sub(_MIPD_ ecn2 *Q,ecn2 *P)
{
BOOL Doubling;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
ecn2_negate(_MIPP_ Q,Q);
Doubling=ecn2_add(_MIPP_ Q,P);
ecn2_negate(_MIPP_ Q,Q);
return Doubling;
}
/*
static void zzn2_print(_MIPD_ char *label, zzn2 *x)
{
char s1[1024], s2[1024];
big a, b;
#ifdef MR_STATIC
char mem_big[MR_BIG_RESERVE(2)];
memset(mem_big, 0, MR_BIG_RESERVE(2));
a=mirvar_mem(_MIPP_ mem_big,0);
b=mirvar_mem(_MIPP_ mem_big,1);
#else
a = mirvar(_MIPP_ 0);
b = mirvar(_MIPP_ 0);
#endif
redc(_MIPP_ x->a, a); otstr(_MIPP_ a, s1);
redc(_MIPP_ x->b, b); otstr(_MIPP_ b, s2);
printf("%s: [%s,%s]\n", label, s1, s2);
#ifndef MR_STATIC
mr_free(a); mr_free(b);
#endif
}
static void nres_print(_MIPD_ char *label, big x)
{
char s[1024];
big a;
a = mirvar(_MIPP_ 0);
redc(_MIPP_ x, a);
otstr(_MIPP_ a, s);
printf("%s: %s\n", label, s);
mr_free(a);
}
*/
BOOL ecn2_add_sub(_MIPD_ ecn2 *P,ecn2 *Q,ecn2 *PP,ecn2 *PM)
{ /* PP=P+Q, PM=P-Q. Assumes P and Q are both normalized, and P!=Q */
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
zzn2 t1,t2,lam;
if (mr_mip->ERNUM) return FALSE;
MR_IN(211)
if (P->marker==MR_EPOINT_GENERAL || P->marker==MR_EPOINT_GENERAL)
{ /* Sorry, some restrictions.. */
mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS);
MR_OUT
return FALSE;
}
if (zzn2_compare(&(P->x),&(Q->x)))
{ /* P=Q or P=-Q - shouldn't happen */
ecn2_copy(P,PP);
ecn2_add(_MIPP_ Q,PP);
ecn2_copy(P,PM);
ecn2_sub(_MIPP_ Q,PM);
MR_OUT
return TRUE;
}
t1.a = mr_mip->w8;
t1.b = mr_mip->w9;
t2.a = mr_mip->w10;
t2.b = mr_mip->w11;
lam.a = mr_mip->w12;
lam.b = mr_mip->w13;
zzn2_copy_i(&(P->x),&t2);
zzn2_sub_i(&t2,&(Q->x),&t2);
zzn2_inv_i(_MIPP_ &t2); /* only one inverse required */
zzn2_add_i(&(P->x),&(Q->x),&(PP->x));
zzn2_copy_i(&(PP->x),&(PM->x));
zzn2_copy_i(&(P->y),&t1);
zzn2_sub_i(&t1,&(Q->y),&t1);
zzn2_copy_i(&t1,&lam);
zzn2_mul_i( &lam,&t2,&lam);
zzn2_copy_i(&lam,&t1);
zzn2_sqr_i( &t1,&t1);
zzn2_sub_i(&t1,&(PP->x),&(PP->x));
zzn2_copy_i(&(Q->x),&(PP->y));
zzn2_sub_i(&(PP->y),&(PP->x),&(PP->y));
zzn2_mul_i( &(PP->y),&lam,&(PP->y));
zzn2_sub_i(&(PP->y),&(Q->y),&(PP->y));
zzn2_copy_i(&(P->y),&t1);
zzn2_add_i(&t1,&(Q->y),&t1);
zzn2_copy_i(&t1,&lam);
zzn2_mul_i( &lam,&t2,&lam);
zzn2_copy_i(&lam,&t1);
zzn2_sqr_i( &t1,&t1);
zzn2_sub_i(&t1,&(PM->x),&(PM->x));
zzn2_copy_i(&(Q->x),&(PM->y));
zzn2_sub_i(&(PM->y),&(PM->x),&(PM->y));
zzn2_mul_i( &(PM->y),&lam,&(PM->y));
zzn2_add_i(&(PM->y),&(Q->y),&(PM->y));
PP->marker=MR_EPOINT_NORMALIZED;
PM->marker=MR_EPOINT_NORMALIZED;
MR_OUT
return TRUE;
}
BOOL ecn2_add(_MIPD_ ecn2 *Q,ecn2 *P)
{ /* P+=Q */
BOOL Doubling=FALSE;
BOOL twist;
int iA;
zzn2 t1,t2,t3,lam;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
t1.a = mr_mip->w8;
t1.b = mr_mip->w9;
t2.a = mr_mip->w10;
t2.b = mr_mip->w11;
t3.a = mr_mip->w12;
t3.b = mr_mip->w13;
lam.a = mr_mip->w14;
lam.b = mr_mip->w15;
twist=mr_mip->TWIST;
if (mr_mip->ERNUM) return FALSE;
if (P->marker==MR_EPOINT_INFINITY)
{
ecn2_copy(Q,P);
return Doubling;
}
if (Q->marker==MR_EPOINT_INFINITY) return Doubling;
MR_IN(205)
if (Q!=P && Q->marker==MR_EPOINT_GENERAL)
{ /* Sorry, this code is optimized for mixed addition only */
mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS);
MR_OUT
return Doubling;
}
#ifndef MR_AFFINE_ONLY
if (mr_mip->coord==MR_AFFINE)
{
#endif
if (!zzn2_compare(&(P->x),&(Q->x)))
{
zzn2_copy_i(&(P->y),&t1);
zzn2_sub_i(&t1,&(Q->y),&t1);
zzn2_copy_i(&(P->x),&t2);
zzn2_sub_i(&t2,&(Q->x),&t2);
zzn2_copy_i(&t1,&lam);
zzn2_inv_i(_MIPP_ &t2);
zzn2_mul_i( &lam,&t2,&lam);
zzn2_add_i(&(P->x),&(Q->x),&(P->x));
zzn2_copy_i(&lam,&t1);
zzn2_sqr_i( &t1,&t1);
zzn2_sub_i(&t1,&(P->x),&(P->x));
zzn2_copy_i(&(Q->x),&(P->y));
zzn2_sub_i(&(P->y),&(P->x),&(P->y));
zzn2_mul_i( &(P->y),&lam,&(P->y));
zzn2_sub_i(&(P->y),&(Q->y),&(P->y));
}
else
{
if (!zzn2_compare(&(P->y),&(Q->y)) || zzn2_iszero(&(P->y)))
{
ecn2_zero(P);
zzn2_from_int(_MIPP_ 1,&lam);
MR_OUT
return Doubling;
}
zzn2_copy_i(&(P->x),&t1);
zzn2_copy_i(&(P->x),&t2);
zzn2_copy_i(&(P->x),&lam);
zzn2_sqr_i( &lam,&lam);
zzn2_copy_i(&lam,&t3);
zzn2_tim2_i(&t3);
zzn2_add_i(&lam,&t3,&lam);
if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&t3);
else zzn2_from_zzn(mr_mip->A,&t3);
if (twist)
{
zzn2_txx_i(&t3);
zzn2_txx_i(&t3);
}
zzn2_add_i(&lam,&t3,&lam);
zzn2_copy_i(&(P->y),&t3);
zzn2_tim2_i(&t3);
zzn2_inv_i(_MIPP_ &t3);
zzn2_mul_i( &lam,&t3,&lam);
zzn2_add_i(&t2,&(P->x),&t2);
zzn2_copy_i(&lam,&(P->x));
zzn2_sqr_i( &(P->x),&(P->x));
zzn2_sub_i(&(P->x),&t2,&(P->x));
zzn2_sub_i(&t1,&(P->x),&t1);
zzn2_mul_i( &t1,&lam,&t1);
zzn2_sub_i(&t1,&(P->y),&(P->y));
}
#ifndef MR_AFFINE_ONLY
zzn2_from_int(_MIPP_ 1,&(P->z));
#endif
P->marker=MR_EPOINT_NORMALIZED;
MR_OUT
return Doubling;
#ifndef MR_AFFINE_ONLY
}
if (Q==P) Doubling=TRUE;
if (!Doubling)
{
if (P->marker!=MR_EPOINT_NORMALIZED)
{
zzn2_sqr_i(&(P->z),&t1);
zzn2_mul_i(&t1,&(P->z),&t2);
zzn2_mul_i(&t1,&(Q->x),&t1);
zzn2_mul_i(&t2,&(Q->y),&t2);
// zzn2_sqr_i( &(P->z),&t1); /* 1S */
// zzn2_mul_i( &t3,&t1,&t3); /* 1M */
// zzn2_mul_i( &t1,&(P->z),&t1); /* 1M */
// zzn2_mul_i( &Yzzz,&t1,&Yzzz); /* 1M */
}
else
{
zzn2_copy(&(Q->x),&t1);
zzn2_copy(&(Q->y),&t2);
}
if (zzn2_compare(&t1,&(P->x))) /*?*/
{
if (!zzn2_compare(&t2,&(P->y)) || zzn2_iszero(&(P->y)))
{
ecn2_zero(P);
zzn2_from_int(_MIPP_ 1,&lam);
MR_OUT
return Doubling;
}
else Doubling=TRUE;
}
}
if (!Doubling)
{ /* Addition */
zzn2_sub_i(&t1,&(P->x),&t1);
zzn2_sub_i(&t2,&(P->y),&t2);
if (P->marker==MR_EPOINT_NORMALIZED) zzn2_copy_i(&t1,&(P->z));
else zzn2_mul_i(&(P->z),&t1,&(P->z));
zzn2_sqr_i(&t1,&t3);
zzn2_mul_i(&t3,&t1,&lam);
zzn2_mul_i(&t3,&(P->x),&t3);
zzn2_copy_i(&t3,&t1);
zzn2_tim2_i(&t1);
zzn2_sqr_i(&t2,&(P->x));
zzn2_dblsub_i(&t1,&lam,&(P->x));
zzn2_sub_i(&t3,&(P->x),&t3);
zzn2_mul_i(&t3,&t2,&t3);
zzn2_mul_i(&lam,&(P->y),&lam);
zzn2_sub_i(&t3,&lam,&(P->y));
}
else
{ /* doubling */
if (P->marker==MR_EPOINT_NORMALIZED) zzn2_from_int(_MIPP_ 1,&t1);
else zzn2_sqr_i(&(P->z),&t1);
if (twist) zzn2_txx_i(&t1);
zzn2_sub_i(&(P->x),&t1,&t2);
zzn2_add_i(&t1,&(P->x),&t1);
zzn2_mul_i(&t2,&t1,&t2);
zzn2_tim3_i(&t2);
zzn2_tim2_i(&(P->y));
if (P->marker==MR_EPOINT_NORMALIZED) zzn2_copy_i(&(P->y),&(P->z));
else zzn2_mul_i(&(P->z),&(P->y),&(P->z));
zzn2_sqr_i(&(P->y),&(P->y));
zzn2_mul_i(&(P->y),&(P->x),&t3);
zzn2_sqr_i(&(P->y),&(P->y));
zzn2_div2_i(&(P->y));
zzn2_sqr_i(&t2,&(P->x));
zzn2_copy_i(&t3,&t1);
zzn2_tim2_i(&t1);
zzn2_sub_i(&(P->x),&t1,&(P->x));
zzn2_sub_i(&t3,&(P->x),&t1);
zzn2_mul_i(&t1,&t2,&t1);
zzn2_sub_i(&t1,&(P->y),&(P->y));
}
P->marker=MR_EPOINT_GENERAL;
MR_OUT
return Doubling;
#endif
}
static int calc_n(int w)
{ /* number of precomputed values needed for given window size */
if (w==3) return 3;
if (w==4) return 5;
if (w==5) return 11;
if (w==6) return 41;
return 0;
}
/* Dahmen, Okeya and Schepers "Affine Precomputation with Sole Inversion in Elliptic Curve Cryptography" */
/* Precomputes table into T. Assumes first P has been copied to P[0], then calculates 3P, 5P, 7P etc. into T */
#define MR_DOS_2 (14+4*MR_STR_SZ_2P)
static void ecn2_dos(_MIPD_ int win,ecn2 *PT)
{
BOOL twist;
int i,j,sz;
zzn2 A,B,C,D,E,T,W,d[MR_STR_SZ_2P],e[MR_STR_SZ_2P];
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_DOS_2);
#else
char mem[MR_BIG_RESERVE(MR_DOS_2)];
memset(mem, 0, MR_BIG_RESERVE(MR_DOS_2));
#endif
twist=mr_mip->TWIST;
j=0;
sz=calc_n(win);
A.a= mirvar_mem(_MIPP_ mem, j++);
A.b= mirvar_mem(_MIPP_ mem, j++);
B.a= mirvar_mem(_MIPP_ mem, j++);
B.b= mirvar_mem(_MIPP_ mem, j++);
C.a= mirvar_mem(_MIPP_ mem, j++);
C.b= mirvar_mem(_MIPP_ mem, j++);
D.a= mirvar_mem(_MIPP_ mem, j++);
D.b= mirvar_mem(_MIPP_ mem, j++);
E.a= mirvar_mem(_MIPP_ mem, j++);
E.b= mirvar_mem(_MIPP_ mem, j++);
T.a= mirvar_mem(_MIPP_ mem, j++);
T.b= mirvar_mem(_MIPP_ mem, j++);
W.a= mirvar_mem(_MIPP_ mem, j++);
W.b= mirvar_mem(_MIPP_ mem, j++);
for (i=0;i<sz;i++)
{
d[i].a= mirvar_mem(_MIPP_ mem, j++);
d[i].b= mirvar_mem(_MIPP_ mem, j++);
e[i].a= mirvar_mem(_MIPP_ mem, j++);
e[i].b= mirvar_mem(_MIPP_ mem, j++);
}
zzn2_add_i(&(PT[0].y),&(PT[0].y),&d[0]); /* 1. d_0=2.y */
zzn2_sqr_i(&d[0],&C); /* 2. C=d_0^2 */
zzn2_sqr_i(&(PT[0].x),&T);
zzn2_add_i(&T,&T,&A);
zzn2_add_i(&T,&A,&T);
if (mr_abs(mr_mip->Asize)<MR_TOOBIG) zzn2_from_int(_MIPP_ mr_mip->Asize,&A);
else zzn2_from_zzn(mr_mip->A,&A);
if (twist)
{
zzn2_txx_i(&A);
zzn2_txx_i(&A);
}
zzn2_add_i(&A,&T,&A); /* 3. A=3x^2+a */
zzn2_copy_i(&A,&W);
zzn2_add_i(&C,&C,&B);
zzn2_add_i(&B,&C,&B);
zzn2_mul_i(&B,&(PT[0].x),&B); /* 4. B=3C.x */
zzn2_sqr_i(&A,&d[1]);
zzn2_sub_i(&d[1],&B,&d[1]); /* 5. d_1=A^2-B */
zzn2_sqr_i(&d[1],&E); /* 6. E=d_1^2 */
zzn2_mul_i(&B,&E,&B); /* 7. B=E.B */
zzn2_sqr_i(&C,&C); /* 8. C=C^2 */
zzn2_mul_i(&E,&d[1],&D); /* 9. D=E.d_1 */
zzn2_mul_i(&A,&d[1],&A);
zzn2_add_i(&A,&C,&A);
zzn2_negate(_MIPP_ &A,&A); /* 10. A=-d_1*A-C */
zzn2_add_i(&D,&D,&T);
zzn2_sqr_i(&A,&d[2]);
zzn2_sub_i(&d[2],&T,&d[2]);
zzn2_sub_i(&d[2],&B,&d[2]); /* 11. d_2=A^2-2D-B */
if (sz>3)
{
zzn2_sqr_i(&d[2],&E); /* 12. E=d_2^2 */
zzn2_add_i(&T,&D,&T);
zzn2_add_i(&T,&B,&T);
zzn2_mul_i(&T,&E,&B); /* 13. B=E(B+3D) */
zzn2_add_i(&A,&A,&T);
zzn2_add_i(&C,&T,&C);
zzn2_mul_i(&C,&D,&C); /* 14. C=D(2A+C) */
zzn2_mul_i(&d[2],&E,&D); /* 15. D=E.d_2 */
zzn2_mul_i(&A,&d[2],&A);
zzn2_add_i(&A,&C,&A);
zzn2_negate(_MIPP_ &A,&A); /* 16. A=-d_2*A-C */
zzn2_sqr_i(&A,&d[3]);
zzn2_sub_i(&d[3],&D,&d[3]);
zzn2_sub_i(&d[3],&B,&d[3]); /* 17. d_3=A^2-D-B */
for (i=4;i<sz;i++)
{
zzn2_sqr_i(&d[i-1],&E); /* 19. E=d(i-1)^2 */
zzn2_mul_i(&B,&E,&B); /* 20. B=E.B */
zzn2_mul_i(&C,&D,&C); /* 21. C=D.C */
zzn2_mul_i(&E,&d[i-1],&D); /* 22. D=E.d(i-1) */
zzn2_mul_i(&A,&d[i-1],&A);
zzn2_add_i(&A,&C,&A);
zzn2_negate(_MIPP_ &A,&A); /* 23. A=-d(i-1)*A-C */
zzn2_sqr_i(&A,&d[i]);
zzn2_sub_i(&d[i],&D,&d[i]);
zzn2_sub_i(&d[i],&B,&d[i]); /* 24. d(i)=A^2-D-B */
}
}
zzn2_copy_i(&d[0],&e[0]);
for (i=1;i<sz;i++)
zzn2_mul_i(&e[i-1],&d[i],&e[i]);
zzn2_copy_i(&e[sz-1],&A);
zzn2_inv_i(_MIPP_ &A);
for (i=sz-1;i>0;i--)
{
zzn2_copy_i(&d[i],&B);
zzn2_mul_i(&e[i-1],&A,&d[i]);
zzn2_mul_i(&A,&B,&A);
}
zzn2_copy_i(&A,&d[0]);
for (i=1;i<sz;i++)
{
zzn2_sqr_i(&e[i-1],&T);
zzn2_mul_i(&d[i],&T,&d[i]); /** */
}
zzn2_mul_i(&W,&d[0],&W);
zzn2_sqr_i(&W,&A);
zzn2_sub_i(&A,&(PT[0].x),&A);
zzn2_sub_i(&A,&(PT[0].x),&A);
zzn2_sub_i(&(PT[0].x),&A,&B);
zzn2_mul_i(&B,&W,&B);
zzn2_sub_i(&B,&(PT[0].y),&B);
zzn2_sub_i(&B,&(PT[0].y),&T);
zzn2_mul_i(&T,&d[1],&T);
zzn2_sqr_i(&T,&(PT[1].x));
zzn2_sub_i(&(PT[1].x),&A,&(PT[1].x));
zzn2_sub_i(&(PT[1].x),&(PT[0].x),&(PT[1].x));
zzn2_sub_i(&A,&(PT[1].x),&(PT[1].y));
zzn2_mul_i(&(PT[1].y),&T,&(PT[1].y));
zzn2_sub_i(&(PT[1].y),&B,&(PT[1].y));
for (i=2;i<sz;i++)
{
zzn2_sub_i(&(PT[i-1].y),&B,&T);
zzn2_mul_i(&T,&d[i],&T);
zzn2_sqr_i(&T,&(PT[i].x));
zzn2_sub_i(&(PT[i].x),&A,&(PT[i].x));
zzn2_sub_i(&(PT[i].x),&(PT[i-1].x),&(PT[i].x));
zzn2_sub_i(&A,&(PT[i].x),&(PT[i].y));
zzn2_mul_i(&(PT[i].y),&T,&(PT[i].y));
zzn2_sub_i(&(PT[i].y),&B,&(PT[i].y));
}
for (i=0;i<sz;i++) PT[i].marker=MR_EPOINT_NORMALIZED;
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_DOS_2);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_DOS_2));
#endif
}
#ifndef MR_DOUBLE_BIG
#define MR_MUL_RESERVE (1+4*MR_STR_SZ_2)
#else
#define MR_MUL_RESERVE (2+4*MR_STR_SZ_2)
#endif
int ecn2_mul(_MIPD_ big k,ecn2 *P)
{
int i,j,nb,n,nbs,nzs,nadds;
big h;
ecn2 T[MR_STR_SZ_2];
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_MUL_RESERVE);
#else
char mem[MR_BIG_RESERVE(MR_MUL_RESERVE)];
memset(mem, 0, MR_BIG_RESERVE(MR_MUL_RESERVE));
#endif
j=0;
#ifndef MR_DOUBLE_BIG
h=mirvar_mem(_MIPP_ mem, j++);
#else
h=mirvar_mem(_MIPP_ mem, j); j+=2;
#endif
for (i=0;i<MR_STR_SZ_2;i++)
{
T[i].x.a= mirvar_mem(_MIPP_ mem, j++);
T[i].x.b= mirvar_mem(_MIPP_ mem, j++);
T[i].y.a= mirvar_mem(_MIPP_ mem, j++);
T[i].y.b= mirvar_mem(_MIPP_ mem, j++);
}
MR_IN(207)
ecn2_norm(_MIPP_ P);
nadds=0;
premult(_MIPP_ k,3,h);
ecn2_copy(P,&T[0]);
ecn2_dos(_MIPP_ MR_WIN_SZ_2,T);
nb=logb2(_MIPP_ h);
for (i=nb-2;i>=1;)
{
if (mr_mip->user!=NULL) (*mr_mip->user)();
n=mr_naf_window(_MIPP_ k,h,i,&nbs,&nzs,MR_WIN_SZ_2);
for (j=0;j<nbs;j++) ecn2_add(_MIPP_ P,P);
if (n>0) {nadds++; ecn2_add(_MIPP_ &T[n/2],P);}
if (n<0) {nadds++; ecn2_sub(_MIPP_ &T[(-n)/2],P);}
i-=nbs;
if (nzs)
{
for (j=0;j<nzs;j++) ecn2_add(_MIPP_ P,P);
i-=nzs;
}
}
ecn2_norm(_MIPP_ P);
MR_OUT
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_MUL_RESERVE);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_MUL_RESERVE));
#endif
return nadds;
}
/* Double addition, using Joint Sparse Form */
/* R=aP+bQ */
#define MR_MUL2_JSF_RESERVE 20
int ecn2_mul2_jsf(_MIPD_ big a,ecn2 *P,big b,ecn2 *Q,ecn2 *R)
{
int e1,h1,e2,h2,bb,nadds;
ecn2 P1,P2,PS,PD;
big c,d,e,f;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_MUL2_JSF_RESERVE);
#else
char mem[MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE)];
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE));
#endif
c = mirvar_mem(_MIPP_ mem, 0);
d = mirvar_mem(_MIPP_ mem, 1);
e = mirvar_mem(_MIPP_ mem, 2);
f = mirvar_mem(_MIPP_ mem, 3);
P1.x.a= mirvar_mem(_MIPP_ mem, 4);
P1.x.b= mirvar_mem(_MIPP_ mem, 5);
P1.y.a= mirvar_mem(_MIPP_ mem, 6);
P1.y.b= mirvar_mem(_MIPP_ mem, 7);
P2.x.a= mirvar_mem(_MIPP_ mem, 8);
P2.x.b= mirvar_mem(_MIPP_ mem, 9);
P2.y.a= mirvar_mem(_MIPP_ mem, 10);
P2.y.b= mirvar_mem(_MIPP_ mem, 11);
PS.x.a= mirvar_mem(_MIPP_ mem, 12);
PS.x.b= mirvar_mem(_MIPP_ mem, 13);
PS.y.a= mirvar_mem(_MIPP_ mem, 14);
PS.y.b= mirvar_mem(_MIPP_ mem, 15);
PD.x.a= mirvar_mem(_MIPP_ mem, 16);
PD.x.b= mirvar_mem(_MIPP_ mem, 17);
PD.y.a= mirvar_mem(_MIPP_ mem, 18);
PD.y.b= mirvar_mem(_MIPP_ mem, 19);
MR_IN(206)
ecn2_norm(_MIPP_ Q);
ecn2_copy(Q,&P2);
copy(b,d);
if (size(d)<0)
{
negify(d,d);
ecn2_negate(_MIPP_ &P2,&P2);
}
ecn2_norm(_MIPP_ P);
ecn2_copy(P,&P1);
copy(a,c);
if (size(c)<0)
{
negify(c,c);
ecn2_negate(_MIPP_ &P1,&P1);
}
mr_jsf(_MIPP_ d,c,e,d,f,c); /* calculate joint sparse form */
if (mr_compare(e,f)>0) bb=logb2(_MIPP_ e)-1;
else bb=logb2(_MIPP_ f)-1;
ecn2_add_sub(_MIPP_ &P1,&P2,&PS,&PD);
ecn2_zero(R);
nadds=0;
while (bb>=0)
{ /* add/subtract method */
if (mr_mip->user!=NULL) (*mr_mip->user)();
ecn2_add(_MIPP_ R,R);
e1=h1=e2=h2=0;
if (mr_testbit(_MIPP_ d,bb)) e2=1;
if (mr_testbit(_MIPP_ e,bb)) h2=1;
if (mr_testbit(_MIPP_ c,bb)) e1=1;
if (mr_testbit(_MIPP_ f,bb)) h1=1;
if (e1!=h1)
{
if (e2==h2)
{
if (h1==1) {ecn2_add(_MIPP_ &P1,R); nadds++;}
else {ecn2_sub(_MIPP_ &P1,R); nadds++;}
}
else
{
if (h1==1)
{
if (h2==1) {ecn2_add(_MIPP_ &PS,R); nadds++;}
else {ecn2_add(_MIPP_ &PD,R); nadds++;}
}
else
{
if (h2==1) {ecn2_sub(_MIPP_ &PD,R); nadds++;}
else {ecn2_sub(_MIPP_ &PS,R); nadds++;}
}
}
}
else if (e2!=h2)
{
if (h2==1) {ecn2_add(_MIPP_ &P2,R); nadds++;}
else {ecn2_sub(_MIPP_ &P2,R); nadds++;}
}
bb-=1;
}
ecn2_norm(_MIPP_ R);
MR_OUT
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_MUL2_JSF_RESERVE);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_JSF_RESERVE));
#endif
return nadds;
}
/* General purpose multi-exponentiation engine, using inter-leaving algorithm. Calculate aP+bQ+cR+dS...
Inputs are divided into two groups of sizes wa<4 and wb<4. For the first group if the points are fixed the
first precomputed Table Ta[] may be taken from ROM. For the second group if the points are variable Tb[j] will
have to computed online. Each group has its own window size, wina (=5?) and winb (=4?) respectively. The values
a,b,c.. are provided in ma[] and mb[], and 3.a,3.b,3.c (as required by the NAF) are provided in ma3[] and
mb3[]. If only one group is required, set wb=0 and pass NULL pointers.
*/
int ecn2_muln_engine(_MIPD_ int wa,int wina,int wb,int winb,big *ma,big *ma3,big *mb,big *mb3,ecn2 *Ta,ecn2 *Tb,ecn2 *R)
{ /* general purpose interleaving algorithm engine for multi-exp */
int i,j,tba[4],pba[4],na[4],sa[4],tbb[4],pbb[4],nb[4],sb[4],nbits,nbs,nzs;
int sza,szb,nadds;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
sza=calc_n(wina);
szb=calc_n(winb);
ecn2_zero(R);
nbits=0;
for (i=0;i<wa;i++) {sa[i]=exsign(ma[i]); tba[i]=0; j=logb2(_MIPP_ ma3[i]); if (j>nbits) nbits=j; }
for (i=0;i<wb;i++) {sb[i]=exsign(mb[i]); tbb[i]=0; j=logb2(_MIPP_ mb3[i]); if (j>nbits) nbits=j; }
nadds=0;
for (i=nbits-1;i>=1;i--)
{
if (mr_mip->user!=NULL) (*mr_mip->user)();
if (R->marker!=MR_EPOINT_INFINITY) ecn2_add(_MIPP_ R,R);
for (j=0;j<wa;j++)
{ /* deal with the first group */
if (tba[j]==0)
{
na[j]=mr_naf_window(_MIPP_ ma[j],ma3[j],i,&nbs,&nzs,wina);
tba[j]=nbs+nzs;
pba[j]=nbs;
}
tba[j]--; pba[j]--;
if (pba[j]==0)
{
if (sa[j]==PLUS)
{
if (na[j]>0) {ecn2_add(_MIPP_ &Ta[j*sza+na[j]/2],R); nadds++;}
if (na[j]<0) {ecn2_sub(_MIPP_ &Ta[j*sza+(-na[j])/2],R); nadds++;}
}
else
{
if (na[j]>0) {ecn2_sub(_MIPP_ &Ta[j*sza+na[j]/2],R); nadds++;}
if (na[j]<0) {ecn2_add(_MIPP_ &Ta[j*sza+(-na[j])/2],R); nadds++;}
}
}
}
for (j=0;j<wb;j++)
{ /* deal with the second group */
if (tbb[j]==0)
{
nb[j]=mr_naf_window(_MIPP_ mb[j],mb3[j],i,&nbs,&nzs,winb);
tbb[j]=nbs+nzs;
pbb[j]=nbs;
}
tbb[j]--; pbb[j]--;
if (pbb[j]==0)
{
if (sb[j]==PLUS)
{
if (nb[j]>0) {ecn2_add(_MIPP_ &Tb[j*szb+nb[j]/2],R); nadds++;}
if (nb[j]<0) {ecn2_sub(_MIPP_ &Tb[j*szb+(-nb[j])/2],R); nadds++;}
}
else
{
if (nb[j]>0) {ecn2_sub(_MIPP_ &Tb[j*szb+nb[j]/2],R); nadds++;}
if (nb[j]<0) {ecn2_add(_MIPP_ &Tb[j*szb+(-nb[j])/2],R); nadds++;}
}
}
}
}
ecn2_norm(_MIPP_ R);
return nadds;
}
/* Routines to support Galbraith, Lin, Scott (GLS) method for ECC */
/* requires an endomorphism psi */
/* *********************** */
/* Precompute T - first half from i.P, second half from i.psi(P) */
void ecn2_precomp_gls(_MIPD_ int win,ecn2 *P,zzn2 *psi,ecn2 *T)
{
int i,j,sz;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
j=0;
sz=calc_n(win);
MR_IN(219)
ecn2_norm(_MIPP_ P);
ecn2_copy(P,&T[0]);
ecn2_dos(_MIPP_ win,T); /* precompute table */
for (i=sz;i<sz+sz;i++)
{
ecn2_copy(&T[i-sz],&T[i]);
ecn2_psi(_MIPP_ psi,&T[i]);
}
MR_OUT
}
/* Calculate a[0].P+a[1].psi(P) using interleaving method */
#define MR_MUL2_GLS_RESERVE (2+2*MR_STR_SZ_2*4)
int ecn2_mul2_gls(_MIPD_ big *a,ecn2 *P,zzn2 *psi,ecn2 *R)
{
int i,j,nadds;
ecn2 T[2*MR_STR_SZ_2];
big a3[2];
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_MUL2_GLS_RESERVE);
#else
char mem[MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE)];
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE));
#endif
for (j=i=0;i<2;i++)
a3[i]=mirvar_mem(_MIPP_ mem, j++);
for (i=0;i<2*MR_STR_SZ_2;i++)
{
T[i].x.a=mirvar_mem(_MIPP_ mem, j++);
T[i].x.b=mirvar_mem(_MIPP_ mem, j++);
T[i].y.a=mirvar_mem(_MIPP_ mem, j++);
T[i].y.b=mirvar_mem(_MIPP_ mem, j++);
T[i].marker=MR_EPOINT_INFINITY;
}
MR_IN(220)
ecn2_precomp_gls(_MIPP_ MR_WIN_SZ_2,P,psi,T);
for (i=0;i<2;i++) premult(_MIPP_ a[i],3,a3[i]); /* calculate for NAF */
nadds=ecn2_muln_engine(_MIPP_ 0,0,2,MR_WIN_SZ_2,NULL,NULL,a,a3,NULL,T,R);
ecn2_norm(_MIPP_ R);
MR_OUT
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_MUL2_GLS_RESERVE);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_GLS_RESERVE));
#endif
return nadds;
}
/* Calculates a[0]*P+a[1]*psi(P) + b[0]*Q+b[1]*psi(Q)
where P is fixed, and precomputations are already done off-line into FT
using ecn2_precomp_gls. Useful for signature verification */
#define MR_MUL4_GLS_V_RESERVE (4+2*MR_STR_SZ_2*4)
int ecn2_mul4_gls_v(_MIPD_ big *a,ecn2 *FT,big *b,ecn2 *Q,zzn2 *psi,ecn2 *R)
{
int i,j,nadds;
ecn2 VT[2*MR_STR_SZ_2];
big a3[2],b3[2];
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_MUL4_GLS_V_RESERVE);
#else
char mem[MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE)];
memset(mem, 0, MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE));
#endif
j=0;
for (i=0;i<2;i++)
{
a3[i]=mirvar_mem(_MIPP_ mem, j++);
b3[i]=mirvar_mem(_MIPP_ mem, j++);
}
for (i=0;i<2*MR_STR_SZ_2;i++)
{
VT[i].x.a=mirvar_mem(_MIPP_ mem, j++);
VT[i].x.b=mirvar_mem(_MIPP_ mem, j++);
VT[i].y.a=mirvar_mem(_MIPP_ mem, j++);
VT[i].y.b=mirvar_mem(_MIPP_ mem, j++);
VT[i].marker=MR_EPOINT_INFINITY;
}
MR_IN(217)
ecn2_precomp_gls(_MIPP_ MR_WIN_SZ_2,Q,psi,VT); /* precompute for the variable points */
for (i=0;i<2;i++)
{ /* needed for NAF */
premult(_MIPP_ a[i],3,a3[i]);
premult(_MIPP_ b[i],3,b3[i]);
}
nadds=ecn2_muln_engine(_MIPP_ 2,MR_WIN_SZ_2P,2,MR_WIN_SZ_2,a,a3,b,b3,FT,VT,R);
ecn2_norm(_MIPP_ R);
MR_OUT
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_MUL4_GLS_V_RESERVE);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_MUL4_GLS_V_RESERVE));
#endif
return nadds;
}
/* Calculate a.P+b.Q using interleaving method. P is fixed and FT is precomputed from it */
void ecn2_precomp(_MIPD_ int win,ecn2 *P,ecn2 *T)
{
int sz;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
sz=calc_n(win);
MR_IN(216)
ecn2_norm(_MIPP_ P);
ecn2_copy(P,&T[0]);
ecn2_dos(_MIPP_ win,T);
MR_OUT
}
#ifndef MR_DOUBLE_BIG
#define MR_MUL2_RESERVE (2+2*MR_STR_SZ_2*4)
#else
#define MR_MUL2_RESERVE (4+2*MR_STR_SZ_2*4)
#endif
int ecn2_mul2(_MIPD_ big a,ecn2 *FT,big b,ecn2 *Q,ecn2 *R)
{
int i,j,nadds;
ecn2 T[2*MR_STR_SZ_2];
big a3,b3;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
#ifndef MR_STATIC
char *mem = memalloc(_MIPP_ MR_MUL2_RESERVE);
#else
char mem[MR_BIG_RESERVE(MR_MUL2_RESERVE)];
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_RESERVE));
#endif
j=0;
#ifndef MR_DOUBLE_BIG
a3=mirvar_mem(_MIPP_ mem, j++);
b3=mirvar_mem(_MIPP_ mem, j++);
#else
a3=mirvar_mem(_MIPP_ mem, j); j+=2;
b3=mirvar_mem(_MIPP_ mem, j); j+=2;
#endif
for (i=0;i<2*MR_STR_SZ_2;i++)
{
T[i].x.a=mirvar_mem(_MIPP_ mem, j++);
T[i].x.b=mirvar_mem(_MIPP_ mem, j++);
T[i].y.a=mirvar_mem(_MIPP_ mem, j++);
T[i].y.b=mirvar_mem(_MIPP_ mem, j++);
T[i].marker=MR_EPOINT_INFINITY;
}
MR_IN(218)
ecn2_precomp(_MIPP_ MR_WIN_SZ_2,Q,T);
premult(_MIPP_ a,3,a3);
premult(_MIPP_ b,3,b3);
nadds=ecn2_muln_engine(_MIPP_ 1,MR_WIN_SZ_2P,1,MR_WIN_SZ_2,&a,&a3,&b,&b3,FT,T,R);
ecn2_norm(_MIPP_ R);
MR_OUT
#ifndef MR_STATIC
memkill(_MIPP_ mem, MR_MUL2_RESERVE);
#else
memset(mem, 0, MR_BIG_RESERVE(MR_MUL2_RESERVE));
#endif
return nadds;
}
#ifndef MR_STATIC
BOOL ecn2_brick_init(_MIPD_ ebrick *B,zzn2 *x,zzn2 *y,big a,big b,big n,int window,int nb)
{ /* Uses Montgomery arithmetic internally *
* (x,y) is the fixed base *
* a,b and n are parameters and modulus of the curve *
* window is the window size in bits and *
* nb is the maximum number of bits in the multiplier */
int i,j,k,t,bp,len,bptr;
ecn2 *table;
ecn2 w;
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (nb<2 || window<1 || window>nb || mr_mip->ERNUM) return FALSE;
t=MR_ROUNDUP(nb,window);
if (t<2) return FALSE;
MR_IN(221)
#ifndef MR_ALWAYS_BINARY
if (mr_mip->base != mr_mip->base2)
{
mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED);
MR_OUT
return FALSE;
}
#endif
B->window=window;
B->max=nb;
table=mr_alloc(_MIPP_ (1<<window),sizeof(ecn2));
if (table==NULL)
{
mr_berror(_MIPP_ MR_ERR_OUT_OF_MEMORY);
MR_OUT
return FALSE;
}
B->a=mirvar(_MIPP_ 0);
B->b=mirvar(_MIPP_ 0);
B->n=mirvar(_MIPP_ 0);
copy(a,B->a);
copy(b,B->b);
copy(n,B->n);
ecurve_init(_MIPP_ a,b,n,MR_AFFINE);
mr_mip->TWIST=TRUE;
w.x.a=mirvar(_MIPP_ 0);
w.x.b=mirvar(_MIPP_ 0);
w.y.a=mirvar(_MIPP_ 0);
w.y.b=mirvar(_MIPP_ 0);
w.marker=MR_EPOINT_INFINITY;
ecn2_set(_MIPP_ x,y,&w);
table[0].x.a=mirvar(_MIPP_ 0);
table[0].x.b=mirvar(_MIPP_ 0);
table[0].y.a=mirvar(_MIPP_ 0);
table[0].y.b=mirvar(_MIPP_ 0);
table[0].marker=MR_EPOINT_INFINITY;
table[1].x.a=mirvar(_MIPP_ 0);
table[1].x.b=mirvar(_MIPP_ 0);
table[1].y.a=mirvar(_MIPP_ 0);
table[1].y.b=mirvar(_MIPP_ 0);
table[1].marker=MR_EPOINT_INFINITY;
ecn2_copy(&w,&table[1]);
for (j=0;j<t;j++)
ecn2_add(_MIPP_ &w,&w);
k=1;
for (i=2;i<(1<<window);i++)
{
table[i].x.a=mirvar(_MIPP_ 0);
table[i].x.b=mirvar(_MIPP_ 0);
table[i].y.a=mirvar(_MIPP_ 0);
table[i].y.b=mirvar(_MIPP_ 0);
table[i].marker=MR_EPOINT_INFINITY;
if (i==(1<<k))
{
k++;
ecn2_copy(&w,&table[i]);
for (j=0;j<t;j++)
ecn2_add(_MIPP_ &w,&w);
continue;
}
bp=1;
for (j=0;j<k;j++)
{
if (i&bp)
ecn2_add(_MIPP_ &table[1<<j],&table[i]);
bp<<=1;
}
}
mr_free(w.x.a);
mr_free(w.x.b);
mr_free(w.y.a);
mr_free(w.y.b);
/* create the table */
len=n->len;
bptr=0;
B->table=mr_alloc(_MIPP_ 4*len*(1<<window),sizeof(mr_small));
for (i=0;i<(1<<window);i++)
{
for (j=0;j<len;j++) B->table[bptr++]=table[i].x.a->w[j];
for (j=0;j<len;j++) B->table[bptr++]=table[i].x.b->w[j];
for (j=0;j<len;j++) B->table[bptr++]=table[i].y.a->w[j];
for (j=0;j<len;j++) B->table[bptr++]=table[i].y.b->w[j];
mr_free(table[i].x.a);
mr_free(table[i].x.b);
mr_free(table[i].y.a);
mr_free(table[i].y.b);
}
mr_free(table);
MR_OUT
return TRUE;
}
void ecn2_brick_end(ebrick *B)
{
mirkill(B->n);
mirkill(B->b);
mirkill(B->a);
mr_free(B->table);
}
#else
/* use precomputated table in ROM */
void ecn2_brick_init(ebrick *B,const mr_small* rom,big a,big b,big n,int window,int nb)
{
B->table=rom;
B->a=a; /* just pass a pointer */
B->b=b;
B->n=n;
B->window=window; /* 2^4=16 stored values */
B->max=nb;
}
#endif
/*
void ecn2_mul_brick(_MIPD_ ebrick *B,big e,zzn2 *x,zzn2 *y)
{
int i,j,t,len,maxsize,promptr;
ecn2 w,z;
#ifdef MR_STATIC
char mem[MR_BIG_RESERVE(10)];
#else
char *mem;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
if (size(e)<0) mr_berror(_MIPP_ MR_ERR_NEG_POWER);
t=MR_ROUNDUP(B->max,B->window);
MR_IN(116)
#ifndef MR_ALWAYS_BINARY
if (mr_mip->base != mr_mip->base2)
{
mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED);
MR_OUT
return;
}
#endif
if (logb2(_MIPP_ e) > B->max)
{
mr_berror(_MIPP_ MR_ERR_EXP_TOO_BIG);
MR_OUT
return;
}
ecurve_init(_MIPP_ B->a,B->b,B->n,MR_BEST);
mr_mip->TWIST=TRUE;
#ifdef MR_STATIC
memset(mem,0,MR_BIG_RESERVE(10));
#else
mem=memalloc(_MIPP_ 10);
#endif
w.x.a=mirvar_mem(_MIPP_ mem, 0);
w.x.b=mirvar_mem(_MIPP_ mem, 1);
w.y.a=mirvar_mem(_MIPP_ mem, 2);
w.y.b=mirvar_mem(_MIPP_ mem, 3);
w.z.a=mirvar_mem(_MIPP_ mem, 4);
w.z.b=mirvar_mem(_MIPP_ mem, 5);
w.marker=MR_EPOINT_INFINITY;
z.x.a=mirvar_mem(_MIPP_ mem, 6);
z.x.b=mirvar_mem(_MIPP_ mem, 7);
z.y.a=mirvar_mem(_MIPP_ mem, 8);
z.y.b=mirvar_mem(_MIPP_ mem, 9);
z.marker=MR_EPOINT_INFINITY;
len=B->n->len;
maxsize=4*(1<<B->window)*len;
for (i=t-1;i>=0;i--)
{
j=recode(_MIPP_ e,t,B->window,i);
ecn2_add(_MIPP_ &w,&w);
if (j>0)
{
promptr=4*j*len;
init_big_from_rom(z.x.a,len,B->table,maxsize,&promptr);
init_big_from_rom(z.x.b,len,B->table,maxsize,&promptr);
init_big_from_rom(z.y.a,len,B->table,maxsize,&promptr);
init_big_from_rom(z.y.b,len,B->table,maxsize,&promptr);
z.marker=MR_EPOINT_NORMALIZED;
ecn2_add(_MIPP_ &z,&w);
}
}
ecn2_norm(_MIPP_ &w);
ecn2_getxy(&w,x,y);
#ifndef MR_STATIC
memkill(_MIPP_ mem,10);
#else
memset(mem,0,MR_BIG_RESERVE(10));
#endif
MR_OUT
}
*/
void ecn2_mul_brick_gls(_MIPD_ ebrick *B,big *e,zzn2 *psi,zzn2 *x,zzn2 *y)
{
int i,j,k,t,len,maxsize,promptr,se[2];
ecn2 w,z;
#ifdef MR_STATIC
char mem[MR_BIG_RESERVE(10)];
#else
char *mem;
#endif
#ifdef MR_OS_THREADS
miracl *mr_mip=get_mip();
#endif
for (k=0;k<2;k++) se[k]=exsign(e[k]);
t=MR_ROUNDUP(B->max,B->window);
MR_IN(222)
#ifndef MR_ALWAYS_BINARY
if (mr_mip->base != mr_mip->base2)
{
mr_berror(_MIPP_ MR_ERR_NOT_SUPPORTED);
MR_OUT
return;
}
#endif
if (logb2(_MIPP_ e[0])>B->max || logb2(_MIPP_ e[1])>B->max)
{
mr_berror(_MIPP_ MR_ERR_EXP_TOO_BIG);
MR_OUT
return;
}
ecurve_init(_MIPP_ B->a,B->b,B->n,MR_BEST);
mr_mip->TWIST=TRUE;
#ifdef MR_STATIC
memset(mem,0,MR_BIG_RESERVE(10));
#else
mem=memalloc(_MIPP_ 10);
#endif
z.x.a=mirvar_mem(_MIPP_ mem, 0);
z.x.b=mirvar_mem(_MIPP_ mem, 1);
z.y.a=mirvar_mem(_MIPP_ mem, 2);
z.y.b=mirvar_mem(_MIPP_ mem, 3);
z.marker=MR_EPOINT_INFINITY;
w.x.a=mirvar_mem(_MIPP_ mem, 4);
w.x.b=mirvar_mem(_MIPP_ mem, 5);
w.y.a=mirvar_mem(_MIPP_ mem, 6);
w.y.b=mirvar_mem(_MIPP_ mem, 7);
#ifndef MR_AFFINE_ONLY
w.z.a=mirvar_mem(_MIPP_ mem, 8);
w.z.b=mirvar_mem(_MIPP_ mem, 9);
#endif
w.marker=MR_EPOINT_INFINITY;
len=B->n->len;
maxsize=4*(1<<B->window)*len;
for (i=t-1;i>=0;i--)
{
ecn2_add(_MIPP_ &w,&w);
for (k=0;k<2;k++)
{
j=recode(_MIPP_ e[k],t,B->window,i);
if (j>0)
{
promptr=4*j*len;
init_big_from_rom(z.x.a,len,B->table,maxsize,&promptr);
init_big_from_rom(z.x.b,len,B->table,maxsize,&promptr);
init_big_from_rom(z.y.a,len,B->table,maxsize,&promptr);
init_big_from_rom(z.y.b,len,B->table,maxsize,&promptr);
z.marker=MR_EPOINT_NORMALIZED;
if (k==1) ecn2_psi(_MIPP_ psi,&z);
if (se[k]==PLUS) ecn2_add(_MIPP_ &z,&w);
else ecn2_sub(_MIPP_ &z,&w);
}
}
}
ecn2_norm(_MIPP_ &w);
ecn2_getxy(&w,x,y);
#ifndef MR_STATIC
memkill(_MIPP_ mem,10);
#else
memset(mem,0,MR_BIG_RESERVE(10));
#endif
MR_OUT
}
|
lslim_learn.c | /**************************************************************/
/*! \File lslim_learn.c
\brief This is the file containing the fundamental functions
to learn the model, used by LSLIM.
\author Evangelia Christakopoulou
\version 1.0
\date 2016
*/
/**************************************************************/
#include<slim.h>
/*****************************************************************************/
/*! \brief This function creates a new training matrix A'' and then
calls the main function local_learn, for computing the model.
Original training matrix A is of size n * m (rows * cols).
An intermediate training matrix A' is created which has:
nrows = rows of the original training matrix A
ncols = (number_of_clusters) * cols of the original matrix A
Every user has then exactly the nnzs of A
which are copied to the cluster in which he belongs.
For all the other clusters he has 0 entries.
In order to be able to regularize properly, we add underneath A'
a diagonal matrix which is of size: ncols * ncols.
This diagonal matrix contains the regularization params across the
diagonal (the local regularization ll).
The final matrix is A''.
Example: for 3 clusters:
m m m
n [ ] [ ] [ b ] n
m [ ll ] * [ w ] = [ 0 ] m
m [ ll ] [ ] [ 0 ] m
m [ ll ] [ 0 ] m
w is of size
A'' 3m * 1.
\param[in] ctrl The ctrl structure.
\param[in] train The training data A.
\param[in] participation The assignment of users to clusters.
\param[in] prev_model The previous model (used to expedite the learning)
\return model The model learnt.
*/
/*****************************************************************************/
gk_csr_t *lslim_learn(ctrl_t * ctrl, gk_csr_t * train, int *participation,
gk_csr_t * prev_model)
{
int i, pos, j, offset, global_nnz;
gk_csr_t *mat, *model = NULL;
global_nnz = train->rowptr[train->nrows] - train->rowptr[0];
mat = gk_csr_Create();
mat->ncols = (ctrl->num_clusters) * train->ncols;
mat->nrows = train->nrows + mat->ncols;
mat->rowptr = gk_zmalloc(mat->nrows + 1, "gk_csr_Dep: local_rowptr");
mat->rowind =
gk_imalloc(global_nnz + mat->ncols, "gk_csr_Dep: local_rowind");
mat->rowval =
gk_fmalloc(global_nnz + mat->ncols, "gk_csr_Dep: local_rowval");
mat->rowptr[0] = 0;
for (i = 0, pos = 0; i < train->nrows; i++) {
/* Copying the nnzs of the user to the cluster to which he belongs */
offset = (participation[i]) * train->ncols;
for (j = train->rowptr[i]; j < train->rowptr[i + 1]; j++) {
mat->rowind[pos] = train->rowind[j] + offset;
mat->rowval[pos] = train->rowval[j];
pos++;
}
mat->rowptr[i + 1] = pos;
}
/* Adding the diagonal matrix underneath A' */
/* Local regularization */
for (i = train->nrows; i < mat->nrows; i++, pos++) {
mat->rowind[pos] = i - train->nrows;
mat->rowval[pos] = ctrl->local_beta;
mat->rowptr[i] = pos;
}
mat->rowptr[mat->nrows] = pos;
gk_csr_CreateIndex(mat, GK_CSR_COL);
gk_csr_CreateIndex(train, GK_CSR_COL);
/* Learning the model */
model = lslim_local_learn(ctrl, train, mat, prev_model);
gk_csr_Free(&mat);
return model;
}
/**************************************************************/
/*! \brief Learning
\details This routine contains the learning algorithm used by LSLIM.
\param[in] ctrl A ctrl structure which contains all the parameters
\param[in] orig_train The original training data (A)
\param[in] train The training data (A'')
\param[in] prev_model The previous model- used to speed up
learning. It is optional argument.
\return model The model returned.
*/
/**************************************************************/
gk_csr_t *lslim_local_learn(ctrl_t * ctrl, gk_csr_t * orig_train,
gk_csr_t * train, gk_csr_t * prev_model)
{
int i, nr, nc, ni, pos, j, ii;
int global_nnz;
int basestart, baseend, datasize, step, starti, endi;
gk_csr_t *mat = NULL;
double *bl, *bu, *c;
int *iinds, *jinds;
float *vals;
int *nnzs = NULL;
int *rinds1 = NULL;
int *rinds = NULL;
int *rjinds = NULL;
float *rvals = NULL;
int rank = 0;
int max_nnzs = 0;
double tmr;
int *rrowcnt = NULL;
/* set up timers */
gk_clearwctimer(tmr);
gk_startwctimer(tmr);
/* constants used across all problems */
nr = train->nrows;
nc = train->ncols;
ni = train->ncols / (ctrl->num_clusters);
/* mallocing */
bl = gk_dsmalloc(nc, ctrl->bl, "malloc bl"); /*lower bound */
bu = gk_dsmalloc(nc, ctrl->bu, "malloc bu"); /*upper bound */
c = gk_dmalloc(nc, "malloc c"); /*linear vector */
gk_dset(nc, ctrl->local_lambda, c);
/*starting and ending columns */
basestart = (ctrl->starti >= 0) ? ctrl->starti : 0;
baseend = (ctrl->endi >= 0) ? ctrl->endi : ni;
datasize = baseend - basestart;
step = (datasize / ctrl->num_procs) +
(ctrl->id < (datasize % ctrl->num_procs) ? 1 : 0);
starti = ((datasize / ctrl->num_procs) * ctrl->id) +
gk_min(ctrl->id, datasize % ctrl->num_procs);
endi = starti + step;
if ((endi < datasize) && (ctrl->id == ctrl->num_procs - 1)) {
endi = datasize;
step = datasize - starti;
}
pos = 0;
iinds = gk_ismalloc(step * nc, 0, "malloc iinds");
jinds = gk_ismalloc(step * nc, 0, "malloc jinds");
vals = gk_fsmalloc(step * nc, 0, "malloc vals");
/* go through all columns */
#pragma omp parallel num_threads(ctrl->num_threads)
{
int mypos;
double *w, *b;
wspace_t *myws;
BCLS *ls;
myws = (wspace_t *) gk_malloc(sizeof(wspace_t), "myws");
myws->mat = train;
myws->ncols = ni;
ls = bcls_create_prob(nr, nc);
w = gk_dsmalloc(nc, 0, "malloc w");
b = gk_dsmalloc(nr, 0, "malloc b");
#pragma omp for private(i, j) schedule(dynamic)
for (i = starti; i < endi; i++) {
// this column is totally empty
if (train->colptr[i + 1] - train->colptr[i] == 0) {
continue;
}
/**********************************************************/
/* BCLS learning */
/**********************************************************/
/* get the i-th column from A */
for (j = orig_train->colptr[i]; j < orig_train->colptr[i + 1]; j++) {
ii = orig_train->colind[j];
b[ii] = 1;
}
myws->max_bcls_niters = gk_min(ctrl->max_bcls_niters,
50 * (train->colptr[i + 1] -
train->colptr[i]));
gk_dset(nc, 0, w);
// disable
myws->acol = i;
if (prev_model != NULL) {
get_row(prev_model, i, w);
}
bcsol(ctrl, b, w, myws, bl, bu, 0, c, ls);
for (j = orig_train->colptr[i]; j < orig_train->colptr[i + 1] - 1; j++) {
ii = orig_train->colind[j];
b[ii] = 0;
}
/**********************************************************/
/* dump the data */
/**********************************************************/
/* compute the triplets */
#pragma omp critical
{
for (j = 0; j < nc; j++) {
if (w[j] > EPSILON) {
mypos = pos++;
iinds[mypos] = i;
jinds[mypos] = j;
vals[mypos] = w[j];
}
}
}
} // end of starti - endi
bcls_free_prob(ls);
gk_free((void **) &myws, (void **) &b, (void **) &w, LTERM);
}
gk_stopwctimer(tmr);
/* if(ctrl->id == 0){
printf("time passed is %f\n", gk_getwctimer(tmr));
}*/
/**********************************************************/
/* Combine all the mat of the different processes
to the total_mat with MPI */
/**********************************************************/
if (ctrl->id == 0) {
nnzs = gk_imalloc(ctrl->num_procs, "malloc nnzs");
gk_iset(ctrl->num_procs, 0, nnzs);
}
MPI_Gather(&pos, 1, MPI_INT, nnzs, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (ctrl->id == 0) {
global_nnz = 0;
for (i = 0; i < ctrl->num_procs; i++) {
global_nnz += nnzs[i];
}
}
MPI_Bcast(&global_nnz, 1, MPI_INT, 0, MPI_COMM_WORLD);
/* Finding the max nnzs between all nodes. This covers the case
when a node which is not 0 has a bigger iinds/jinds/vals matrix
than the one in node 0 */
if (ctrl->id == 0) {
max_nnzs = nnzs[0];
for (rank = 1; rank < ctrl->num_procs; rank++)
if (nnzs[rank] > max_nnzs)
max_nnzs = nnzs[rank];
}
/* Each node creates its own row count. This gets sent to node 0, in order
to create the total rowptr. */
if (global_nnz / ctrl->num_procs >= ni) {
rrowcnt = gk_ismalloc(ni, 0, "rrowcnt");
for (i = 0; i < pos; i++) {
rrowcnt[iinds[i]]++;
}
}
/* Every node sends its own iinds, jinds and vals. */
if (ctrl->id != 0) {
if (global_nnz / ctrl->num_procs < ni) {
MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
else {
MPI_Send(rrowcnt, ni, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(jinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(vals, pos, MPI_FLOAT, 0, 0, MPI_COMM_WORLD);
}
if (ctrl->id == 0) {
if (global_nnz / ctrl->num_procs < ni) {
rinds1 = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds1"));
}
rinds = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds"));
rjinds = gk_icopy(nnzs[0], jinds, gk_imalloc(max_nnzs, "rjinds"));
rvals = gk_fcopy(nnzs[0], vals, gk_fmalloc(max_nnzs, "rvals"));
}
gk_free((void **) &iinds, &jinds, &vals, &bl, &bu, &c, LTERM);
/* Allocate and populate matrix */
mat = gk_csr_Create();
mat->nrows = ni;
mat->ncols = nc;
mat->rowptr = gk_zsmalloc(ni + 1, 0, "rowptr");
mat->rowind = gk_imalloc(global_nnz, "rowind");
mat->rowval = gk_fmalloc(global_nnz, "rowval");
if (ctrl->id == 0) {
if (global_nnz / ctrl->num_procs < ni) {
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rinds1, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (i = 0; i < nnzs[rank]; i++) {
mat->rowptr[rinds1[i]]++;
}
}
}
else {
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rrowcnt, ni, MPI_INT, rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
for (i = 0; i < ni; i++) {
mat->rowptr[i] += rrowcnt[i];
}
}
}
MAKECSR(i, mat->nrows, mat->rowptr);
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rinds, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(rjinds, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(rvals, nnzs[rank], MPI_FLOAT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (i = 0; i < nnzs[rank]; i++) {
mat->rowind[mat->rowptr[rinds[i]]] = rjinds[i];
mat->rowval[mat->rowptr[rinds[i]]] = rvals[i];
mat->rowptr[rinds[i]]++;
}
}
SHIFTCSR(i, mat->nrows, mat->rowptr);
gk_free((void **) &rinds, &rjinds, &rvals,
&nnzs, LTERM);
if (global_nnz / ctrl->num_procs < ni) {
gk_free((void **) &rinds1, LTERM);
}
}
if (rrowcnt != NULL) {
gk_free((void **) &rrowcnt, LTERM);
}
/* Broadcast the matrix to the other nodes */
MPI_Bcast(mat->rowptr, ni + 1, MPI_LONG, 0, MPI_COMM_WORLD);
MPI_Bcast(mat->rowind, global_nnz, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(mat->rowval, global_nnz, MPI_FLOAT, 0, MPI_COMM_WORLD);
return mat;
}
|
sparselu.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if (r_err < 0.0 ) r_err = -r_err;
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
int a=0,b=0;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
a++;
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
b++;
M[ii*bots_arg_size+jj] = NULL;
}
}
}
bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b)));
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel private(kk)
{
for (kk=0; kk<bots_arg_size; kk++)
{
#pragma omp single
lu0(BENCH[kk*bots_arg_size+kk]);
#pragma omp for nowait
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
#pragma omp for
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
#pragma omp for private(jj)
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
bots_message(" completed!\n");
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
ast-dump-openmp-parallel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp parallel
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel.c:4:1) *const restrict'
|
GB_unop__ainv_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fc32_fc32)
// op(A') function: GB (_unop_tran__ainv_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_FC32_ainv (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC32_ainv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_FC32_ainv (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_FC32_ainv (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_FC32_ainv (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_for_nowait.c | <ompts:test>
<ompts:description>Test which checks the omp parallel for nowait directive. It fills an array with values and operates on these in the following.</ompts:description>
<ompts:directive>omp parallel for nowait</ompts:directive>
<ompts:version>1.0</ompts:version>
<ompts:dependences>omp parallel for, omp flush</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_for_nowait</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int result;
int count;
</ompts:orphan:vars>
int j;
int myarray[LOOPCOUNT];
result = 0;
count = 0;
#pragma omp parallel
{
<ompts:orphan>
int rank;
int i;
rank = omp_get_thread_num();
#pragma omp for <ompts:check>nowait</ompts:check>
for (i = 0; i < LOOPCOUNT; i++) {
if (i == 0) {
fprintf (logFile, "Thread nr %d entering for loop and going to sleep.\n", rank);
my_sleep(SLEEPTIME);
count = 1;
#pragma omp flush(count)
fprintf (logFile, "Thread nr %d woke up and set count = 1.\n", rank);
}
}
fprintf (logFile, "Thread nr %d exited first for loop and enters the second.\n", rank);
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++)
{
#pragma omp flush(count)
if (count == 0)
result = 1;
}
</ompts:orphan>
}
return result;
}
</ompts:testcode>
</ompts:test>
|
measure.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
int collapse_cluster(FILE *input_fptr, FILE *output_fptr, int rank, int subcircuit_idx, int num_instance, int cluster_circ_size, int **correspondece_map, int num_effective_qubits, int num_collapsed);
float* measure_instance(int subcircuit_circ_size, char** meas, float *unmeasured_prob, int **correspondece_map, int num_effective);
void measure(char *eval_folder, int subcircuit_idx, int num_eval_files, int *eval_files, int rank);
int** effective_full_state_correspondence(int cluster_circ_size, char **meas);
int* decToBinary(int num, int num_digits);
int binaryToDec(int *bin_num, int num_digits);
void print_int_arr(int *arr, int num_elements);
void print_float_arr(float *arr, int num_elements);
int search_element(int *arr, int arr_size, int element);
int combine_effective_O_state(int *bin_effective_state, int num_effective_qubits, int *bin_O_state, int num_O_qubits, int *O_qubit_positions);
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank,int subcircuit_idx);
double get_sec();
int main(int argc, char** argv) {
int rank = atoi(argv[1]);
char *eval_folder = argv[2];
int full_circ_size = atoi(argv[3]);
int subcircuit_idx = atoi(argv[4]);
int num_eval_files = atoi(argv[5]);
int *eval_files = calloc(num_eval_files,sizeof(int));
int i;
for (i=0; i<num_eval_files; i++) {
eval_files[i] = atoi(argv[6+i]);
}
measure(eval_folder,subcircuit_idx,num_eval_files,eval_files,rank);
free(eval_files);
// printf("%s subcircuit %d (%d instances) measure rank %d DONE\n",eval_folder,subcircuit_idx,num_eval_files,rank);
return 0;
}
void measure(char *eval_folder, int subcircuit_idx, int num_eval_files, int *eval_files, int rank) {
char *eval_file = malloc(256*sizeof(char));
sprintf(eval_file, "%s/raw_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[0]);
FILE* eval_fptr = fopen(eval_file, "r");
int subcircuit_circ_size, num_effective;
fscanf(eval_fptr, "d=%d effective=%d\n", &subcircuit_circ_size,&num_effective);
char *init[subcircuit_circ_size], *meas[subcircuit_circ_size];
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
init[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", init[qubit_ctr]);
}
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
meas[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", meas[qubit_ctr]);
}
free(eval_file);
fclose(eval_fptr);
int **correspondece_map = effective_full_state_correspondence(subcircuit_circ_size, meas);
int eval_file_ctr;
double total_measure_time = 0;
double log_time = 0;
for (eval_file_ctr=0;eval_file_ctr<num_eval_files;eval_file_ctr++) {
double measure_begin = get_sec();
char *eval_file = malloc(256*sizeof(char));
sprintf(eval_file, "%s/raw_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[eval_file_ctr]);
// printf("Measuring %s\n",eval_file);
FILE* eval_fptr = fopen(eval_file, "r");
char line[256];
int subcircuit_circ_size, num_effective;
fscanf(eval_fptr, "d=%d effective=%d\n", &subcircuit_circ_size,&num_effective);
char *init[subcircuit_circ_size], *meas[subcircuit_circ_size];
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
init[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", init[qubit_ctr]);
// printf("%s ",init[qubit_ctr]);
}
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
meas[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", meas[qubit_ctr]);
// printf("%s ",meas[qubit_ctr]);
}
long long int state_ctr;
long long int unmeasured_len = (long long int) pow(2,subcircuit_circ_size);
float *unmeasured_prob = malloc(unmeasured_len*sizeof(float));
for (state_ctr=0;state_ctr<unmeasured_len;state_ctr++){
fscanf(eval_fptr, "%f ", &unmeasured_prob[state_ctr]);
}
// printf("\n");
float* measured_prob = measure_instance(subcircuit_circ_size,meas,unmeasured_prob,correspondece_map,num_effective);
remove(eval_file);
free(eval_file);
fclose(eval_fptr);
long long int num_effective_states = (long long int) pow(2,num_effective);
char *meas_file = malloc(256*sizeof(char));
sprintf(meas_file, "%s/measured_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[eval_file_ctr]);
FILE *meas_fptr = fopen(meas_file, "w");
// fprintf(meas_fptr,"effective=%d\n",num_effective);
for (state_ctr=0;state_ctr<num_effective_states;state_ctr++) {
fprintf(meas_fptr,"%e ",measured_prob[state_ctr]);
}
free(meas_file);
fclose(meas_fptr);
log_time += get_sec() - measure_begin;
total_measure_time += get_sec() - measure_begin;
// NOTE: log_frequency is hard coded here
log_time = print_log(log_time,total_measure_time,eval_file_ctr+1,num_eval_files,300,rank,subcircuit_idx);
}
char *summary_file = malloc(256*sizeof(char));
sprintf(summary_file, "%s/rank_%d_summary.txt", eval_folder, rank);
FILE *summary_fptr = fopen(summary_file, "w");
fprintf(summary_fptr,"Total measure time = %e\n",total_measure_time);
fprintf(summary_fptr,"measure DONE\n");
free(summary_file);
fclose(summary_fptr);
return;
}
float* measure_instance(int subcircuit_circ_size, char** meas, float *unmeasured_prob, int **correspondece_map, int num_effective) {
int num_O_qubits = subcircuit_circ_size - num_effective;
// printf("\n");
if (num_effective==subcircuit_circ_size) {
return unmeasured_prob;
}
else{
long long int measured_len = (long long int) pow(2,num_effective);
float *measured_prob = calloc(measured_len,sizeof(float));
long long int measured_state_ctr;
//#pragma omp parallel for
for (measured_state_ctr=0;measured_state_ctr<measured_len;measured_state_ctr++) {
// printf("Effective_state : %d\n",effective_state_ctr);
int O_state_ctr;
int num_O_states = (int) pow(2,num_O_qubits);
for (O_state_ctr=0;O_state_ctr<num_O_states;O_state_ctr++){
int full_state = correspondece_map[measured_state_ctr][O_state_ctr];
int *bin_full_state = decToBinary(full_state, subcircuit_circ_size); // Decompose the function to in-place
int sigma = 1;
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
if (bin_full_state[qubit_ctr]==1 && strcmp(meas[subcircuit_circ_size-1-qubit_ctr],"I")!=0 && strcmp(meas[subcircuit_circ_size-1-qubit_ctr],"comp")!=0) {
sigma *= -1;
}
}
// print_int_arr(bin_full_state, subcircuit_circ_size);
// printf("(%d) ",full_state);
measured_prob[measured_state_ctr] += sigma*unmeasured_prob[full_state];
// printf("corresponding full_state : %d, sigma = %d, val = %.5e, measured_prob = %.5e\n",full_state, sigma, sigma*unmeasured_prob[full_state],measured_prob[measured_state_ctr]);
}
if (measured_prob[measured_state_ctr]>10) {
printf("Something Wrong\n");
exit(0);
}
// printf("\n");
}
return measured_prob;
}
}
int** effective_full_state_correspondence(int cluster_circ_size, char **meas) {
int num_effective_qubits = 0;
int num_O_qubits = 0;
int qubit_ctr;
int O_qubit_positions[cluster_circ_size];
for (qubit_ctr=0;qubit_ctr<cluster_circ_size;qubit_ctr++) {
if (strcmp(meas[qubit_ctr],"comp")==0) {
num_effective_qubits++;
}
else {
O_qubit_positions[num_O_qubits] = qubit_ctr;
num_O_qubits++;
}
}
int num_O_states = (int) pow(2,num_O_qubits);
int num_effective_states = (int) pow(2,num_effective_qubits);
int effective_state;
int **correspondece_map = (int **)malloc(sizeof(int *)*num_effective_states);
for (effective_state=0;effective_state<num_effective_states;effective_state++) {
int *bin_effective_state = decToBinary(effective_state, num_effective_qubits);
// printf("Effective state = %d\n",effective_state);
int O_state;
correspondece_map[effective_state]=(int *)malloc(sizeof(int)*num_O_states);
for (O_state=0;O_state<num_O_states;O_state++) {
int *bin_O_state = decToBinary(O_state, num_O_qubits);
int full_state = combine_effective_O_state(bin_effective_state, num_effective_qubits, bin_O_state, num_O_qubits, O_qubit_positions);
// printf("%d ",full_state);
correspondece_map[effective_state][O_state] = full_state;
}
// printf("\n");
}
return correspondece_map;
}
int* decToBinary(int num, int num_digits) {
int *bin = malloc(num_digits*sizeof(int));
int i;
for (i = num_digits - 1; i >= 0; i--) {
int k = num >> i;
if (k & 1) {
bin[num_digits - 1 - i] = 1;
}
else {
bin[num_digits - 1 - i] = 0;
}
}
return bin;
}
int binaryToDec(int *bin_num, int num_digits) {
int i;
int dec = 0;
for (i=0;i<num_digits;i++) {
if (bin_num[i]==1) {
// printf("Add %d\n",1<<(num_digits-1-i));
dec += 1<<(num_digits-1-i);
}
}
return dec;
}
void print_int_arr(int *arr, int num_elements) {
int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%d ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
printf(" = %d elements\n",num_elements);
}
void print_float_arr(float *arr, int num_elements) {
int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%e ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
printf(" = %d elements\n",num_elements);
}
int search_element(int *arr, int arr_size, int element) {
int i;
for(i=0;i<arr_size;i++) {
if (arr[i]==element) {
return i;
}
}
return -1;
}
int combine_effective_O_state(int *bin_effective_state, int num_effective_qubits, int *bin_O_state, int num_O_qubits, int *O_qubit_positions) {
// printf("effective_state : ");
// print_int_arr(bin_effective_state,num_effective_qubits);
// printf(", inserting O_state ");
// print_int_arr(bin_O_state,num_O_qubits);
// printf(" at O positions ");
// print_int_arr(O_qubit_positions,num_O_qubits);
// printf("\n");
int bin_full_state[num_effective_qubits+num_O_qubits];
int full_state_ctr;
int effective_state_ctr = 0;
int O_state_ctr = 0;
for (full_state_ctr=0;full_state_ctr<num_effective_qubits+num_O_qubits;full_state_ctr++) {
int O_qubit_position = search_element(O_qubit_positions, num_O_qubits, full_state_ctr);
if (O_qubit_position==-1) {
bin_full_state[num_effective_qubits+num_O_qubits-1-full_state_ctr] = bin_effective_state[num_effective_qubits - 1 - effective_state_ctr];
effective_state_ctr++;
}
else {
bin_full_state[num_effective_qubits+num_O_qubits-1-full_state_ctr] = bin_O_state[O_qubit_position];
}
}
int full_state = binaryToDec(bin_full_state,num_effective_qubits+num_O_qubits);
// printf("Full state:");
// print_int_arr(bin_full_state,num_effective_qubits+num_O_qubits);
// printf(" --> %d\n",full_state);
return full_state;
}
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank,int subcircuit_idx) {
if (log_time>log_frequency) {
double eta = elapsed_time/num_finished_jobs*num_total_jobs - elapsed_time;
printf("Meas_rank %d measured subcircuit %d %d/%d, elapsed = %e, ETA = %e\n",rank,subcircuit_idx,num_finished_jobs,num_total_jobs,elapsed_time,eta);
return 0;
}
else {
return log_time;
}
}
double get_sec() {
struct timeval time;
gettimeofday(&time, NULL);
return (time.tv_sec + 1e-6 * time.tv_usec);
}
|
GB_unop__identity_bool_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_int32)
// op(A') function: GB (_unop_tran__identity_bool_int32)
// C type: bool
// A type: int32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_int32)
(
bool *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
irbuilder_unroll_partial_heuristic_constant_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
// TODO: The unroll-factor heuristic might be able to use the information that the trip count is constant, but currently is not able to determine that.
#ifndef HEADER
#define HEADER
double sind(double);
// CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_constant_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8
// CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
// CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
// CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 4
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 4, %[[TMP12]]
// CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP19]] to double
// CHECK-NEXT: %[[CALL:.+]] = call double @sind(double %[[CONV]])
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP22]] to double
// CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM5]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX6]], align 4
// CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP25]] to double
// CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[E_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM9]]
// CHECK-NEXT: %[[TMP28:.+]] = load float, float* %[[ARRAYIDX10]], align 4
// CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP28]] to double
// CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]]
// CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP29]] to double
// CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]]
// CHECK-NEXT: %[[TMP30:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP31:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP31]] to i64
// CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP30]], i64 %[[IDXPROM14]]
// CHECK-NEXT: %[[TMP32:.+]] = load float, float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP32]] to double
// CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]]
// CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float
// CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for
#pragma omp unroll partial
for (int i = 0; i < 128; i++) {
a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 128, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP9]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
|
ctl_scroll.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : ctl_scroll.c
* Description : scroll control
*
* + This is part of libaroma, an embedded ui toolkit.
* + 12/02/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_ctl_scroll_c__
#define __libaroma_ctl_scroll_c__
#include <aroma_internal.h>
#include "../ui/ui_internal.h"
/* HANDLER */
dword _libaroma_ctl_scroll_msg(LIBAROMA_CONTROLP, LIBAROMA_MSGP);
void _libaroma_ctl_scroll_draw (LIBAROMA_CONTROLP, LIBAROMA_CANVASP);
void _libaroma_ctl_scroll_destroy(LIBAROMA_CONTROLP);
byte _libaroma_ctl_scroll_thread(LIBAROMA_CONTROLP);
static LIBAROMA_CONTROL_HANDLER _libaroma_ctl_scroll_handler={
message:_libaroma_ctl_scroll_msg,
draw:_libaroma_ctl_scroll_draw,
focus:NULL,
destroy:_libaroma_ctl_scroll_destroy,
thread:_libaroma_ctl_scroll_thread
};
/*
* SCROLL CONTROL BEHAVIOUR CONFIGURATIONS
*
*/
/* max cache height size */
#define _LIBAROMA_CTL_SCROLL_MAX_CACHE (libaroma_fb()->h * 10)
/* size of touch handle */
#define _LIBAROMA_CTL_SCROLL_HANDLE_DP 36
/* wait ms before it send down event to client */
#define _LIBAROMA_CTL_SCROLL_TOUCH_CLIENT_WAIT 120
/* minimal touch y-move in dp if client request touch message */
#define _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP 24
/* minimal touch y-move in dp if client doesn't request touch message */
#define _LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP_NOITEM 5
/*
#define LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE 1
#define LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD 1
*/
/*
* Structure : __LIBAROMA_CTL_SCROLL
* Typedef : _LIBAROMA_CTL_SCROLL, * _LIBAROMA_CTL_SCROLLP
* Descriptions: button control internal structure
*/
typedef struct __LIBAROMA_CTL_SCROLL _LIBAROMA_CTL_SCROLL;
typedef struct __LIBAROMA_CTL_SCROLL * _LIBAROMA_CTL_SCROLLP;
struct __LIBAROMA_CTL_SCROLL{
/* drawing & canvas */
LIBAROMA_CANVASP client_canvas;
word color_bg;
byte flags;
/* threads */
byte active;
#ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
LIBAROMA_THREAD cache_thread;
#endif
LIBAROMA_THREAD calc_thread;
/* scrolling values */
int request_new_height;
int scroll_y;
int client_h;
int max_scroll_y;
int request_scroll_y;
long scroll_tick;
int scroll_state;
/* cache values */
byte cache_state;
byte move_state;
int cache_y;
int draw_y;
int synced_y;
long scroll_handle_time;
/* touch event */
byte touched;
byte handle_touched;
byte allow_scroll;
int touch_x;
int touch_y;
int touch_scroll_y;
/* client touch event */
long client_touch_start;
byte client_touched;
/* overshoot */
byte ovs_bounce;
long ovs_start;
float ovs_state;
float ovs_ustate;
long ovs_ustart;
int ovs_x;
int ovs_y;
LIBAROMA_CTL_SCROLL_OVERSHOOT_EFFECT ovs_custom_cb;
/* fling items */
int bounce_velocity;
int velocity;
LIBAROMA_FLING fling;
/* client data */
LIBAROMA_CTL_SCROLL_CLIENT client;
LIBAROMA_MUTEX mutex;
LIBAROMA_MUTEX fmutex;
LIBAROMA_MUTEX blitmutex;
LIBAROMA_COND_MUTEX cmutex;
LIBAROMA_COND ccond;
/* minscroll handler */
LIBAROMA_CTL_SCROLL_MINSCROLL_HANDLER minscroll_cb;
int minscroll_y;
};
/*
* Function : _libaroma_ctl_scroll_client_msg
* Return Value: dword
* Descriptions: send client message
*/
dword _libaroma_ctl_scroll_client_msg(
LIBAROMA_CONTROLP ctl, byte message, int x, int y
){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client.handler->message){
LIBAROMA_MSG msgc;
libaroma_wm_compose(
&msgc,
LIBAROMA_CTL_SCROLL_MSG,
NULL,
message,
0
);
return me->client.handler->message(
ctl, &me->client, &msgc, x, y
);
}
return 0;
} /* End of _libaroma_ctl_scroll_client_msg */
/*
* Function : _libaroma_ctl_scroll_updatecache
* Return Value: byte
* Descriptions: update cache drawing
*/
byte _libaroma_ctl_scroll_updatecache(LIBAROMA_CONTROLP ctl, int move_sz){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client_canvas==NULL){
return 0;
}
libaroma_mutex_lock(me->fmutex);
int move_value=0;
int cvhsz = (me->client_canvas->h / 2);
if (move_sz<0){
/* draw top */
move_value = 0-cvhsz;
if (move_value>move_sz){
move_value=move_sz;
}
if (me->draw_y+move_value<0){
move_value=0-me->draw_y;
}
}
else if (move_sz>0){
/* draw bottom */
move_value = cvhsz;
if (move_value<move_sz){
move_value=move_sz;
}
if (me->draw_y+move_value>me->max_scroll_y){
move_value=me->max_scroll_y-me->draw_y;
}
if (move_value<0){
move_value=0;
}
}
if ((me->cache_state==10)||(me->cache_state==11)){
me->cache_state=0;
int client_y=me->draw_y;
/* force redraw all */
if (me->client.handler->draw!=NULL){
me->client.handler->draw(
ctl, &me->client, me->client_canvas,
0,client_y,me->client_canvas->w,me->client_canvas->h
);
}
else{
libaroma_canvas_setcolor(me->client_canvas,me->color_bg,0xff);
}
me->cache_y=0;
me->synced_y=-1;
libaroma_mutex_unlock(me->fmutex);
return 1;
}
me->cache_state=0;
if (move_value!=0){
byte is_top = (move_value<0)?1:0;
int cache_h = abs(move_value);
int cache_y = me->cache_y+move_value;
int client_y= me->draw_y+(is_top?move_value:me->client_canvas->h);
if (cache_y<0){
cache_y = me->client_canvas->h + cache_y;
}
else if (cache_y>=me->client_canvas->h){
cache_y = cache_y-me->client_canvas->h;
}
/* redrawing client */
LIBAROMA_CANVASP redraw_canvas;
int top_y=is_top?cache_y:cache_y-cache_h;
int top_h=cache_h;
int bottom_h=0;
if (top_y<0){
top_h = abs(top_y);
bottom_h = cache_h - top_h;
top_y = me->client_canvas->h-top_h;
}
else if (top_y+top_h>me->client_canvas->h){
top_h = me->client_canvas->h - top_y;
bottom_h = cache_h - top_h;
}
/* top section */
if (top_h>0){
redraw_canvas = libaroma_canvas_area(
me->client_canvas, 0, top_y, me->client_canvas->w, top_h
);
if (me->client.handler->draw){
me->client.handler->draw(
ctl, &me->client, redraw_canvas,
0, client_y, redraw_canvas->w, redraw_canvas->h
);
}
else{
libaroma_canvas_setcolor(redraw_canvas,me->color_bg,0xff);
}
libaroma_canvas_free(redraw_canvas);
}
/* bottom section */
if (bottom_h>0){
redraw_canvas = libaroma_canvas_area(
me->client_canvas,
0, 0, me->client_canvas->w, bottom_h
);
if (me->client.handler->draw){
me->client.handler->draw(
ctl, &me->client, redraw_canvas,
0, client_y+top_h, redraw_canvas->w, redraw_canvas->h
);
}
else{
libaroma_canvas_setcolor(redraw_canvas,me->color_bg,0xff);
}
libaroma_canvas_free(redraw_canvas);
}
/* update info */
me->cache_y=cache_y;
me->draw_y+=move_value;
me->synced_y=-1;
libaroma_mutex_unlock(me->fmutex);
return 1;
}
libaroma_mutex_unlock(me->fmutex);
return 0;
} /* End of _libaroma_ctl_scroll_updatecache */
/*
* Function : _libaroma_ctl_scroll_check_update
* Return Value: byte
* Descriptions: check for cache update
*/
#ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE
byte _libaroma_ctl_scroll_check_update(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if ((me->client.handler)&&(me->client_canvas!=NULL)){
if ((me->cache_state)&&(me->cache_state!=10)){
int cvhsz = (me->client_canvas->h / 4);
int draw_top = me->draw_y;
int draw_bottom = draw_top+me->client_canvas->h;
if (me->scroll_y<draw_top+me->cache_y){
_libaroma_ctl_scroll_updatecache(ctl,-cvhsz);
return 1;
}
else if (me->scroll_y>draw_bottom+me->cache_y){
_libaroma_ctl_scroll_updatecache(ctl,cvhsz);
return 1;
}
else if (me->move_state==1){
if ((me->scroll_y<draw_top+cvhsz)&&(draw_top>0)){
_libaroma_ctl_scroll_updatecache(ctl,-cvhsz);
return 1;
}
}
else if (me->move_state==2){
if ((me->scroll_y>draw_bottom-cvhsz)&&(draw_bottom<me->client_h)){
_libaroma_ctl_scroll_updatecache(ctl,cvhsz);
return 1;
}
}
}
}
return 0;
} /* End of _libaroma_ctl_scroll_check_update */
#endif
/*
* Function : _libaroma_ctl_scroll_cache_thread
* Return Value: static void *
* Descriptions: background cache updater
*/
#ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
static void * _libaroma_ctl_scroll_cache_thread(void * cookie){
LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cookie;
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
ALOGV("Start scroll updater thread");
while (me->active){
/* update new height */
if (me->client.handler){
if (me->request_new_height!=-1){
libaroma_ctl_scroll_set_height(ctl,me->request_new_height);
libaroma_mutex_lock(me->fmutex);
me->request_new_height=-1;
libaroma_mutex_unlock(me->fmutex);
}
if (me->cache_state==10){
_libaroma_ctl_scroll_updatecache(ctl, 0);
}
#ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE
else if (me->client_canvas!=NULL){
if ((me->client_h>me->client_canvas->h)&&(me->request_new_height==-1)){
_libaroma_ctl_scroll_check_update(ctl);
}
}
#endif
}
libaroma_sleep(1);
}
ALOGV("End scroll updater thread");
return NULL;
} /* End of _libaroma_ctl_scroll_cache_thread */
#endif
/*
* Function : _libaroma_ctl_scroll_calc_thread
* Return Value: static void *
* Descriptions: background calculation updater
*/
static void * _libaroma_ctl_scroll_calc_thread(void * cookie){
LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cookie;
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
ALOGI("Start scroll calculation thread");
byte need_drawing = 0;
while (me->active){
libaroma_cond_lock(&me->cmutex);
libaroma_cond_wait(&me->ccond, &me->cmutex);
libaroma_cond_unlock(&me->cmutex);
if (!me->active){
break;
}
if (me->client.handler){
/* run client thread */
libaroma_mutex_lock(me->mutex);
if ((me->client_touch_start!=0)&&
(libaroma_tick()-me->client_touch_start>
_LIBAROMA_CTL_SCROLL_TOUCH_CLIENT_WAIT)){
me->client_touch_start=0;
/* send touch down message to client */
if (me->client.handler->message){
int client_x = me->touch_x;
int client_y = me->touch_y + me->scroll_y;
if (_libaroma_ctl_scroll_client_msg(
ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_DOWN,
client_x, client_y
)==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){
need_drawing=1;
}
me->client_touched=1;
}
}
libaroma_mutex_unlock(me->mutex);
/* client thread */
if (me->client.handler->thread!=NULL){
if (me->client.handler->thread(ctl,&me->client)){
need_drawing=1;
}
}
/* drawing handler */
if (need_drawing){
me->synced_y=-1;
need_drawing=0;
}
}
}
ALOGI("End scroll calculation thread");
return NULL;
} /* End of _libaroma_ctl_scroll_calc_thread */
/*
* Function : _libaroma_ctl_scroll_thread
* Return Value: byte
* Descriptions: control thread callback
*/
byte _libaroma_ctl_scroll_thread(LIBAROMA_CONTROLP ctl) {
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
byte need_drawing=0;
if (me->client.handler){
libaroma_cond_lock(&me->cmutex);
libaroma_cond_signal(&me->ccond);
libaroma_cond_unlock(&me->cmutex);
if (!me->active){
return 0;
}
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel sections
{
#pragma omp section
{
#endif
/* overshoot */
if (me->ovs_ustart>1){
float nowstate=
libaroma_control_state(me->ovs_ustart,
me->ovs_bounce==2?1000:400);
if (nowstate<1){
if (nowstate!=me->ovs_ustate){
me->ovs_ustate=nowstate;
need_drawing=1;
}
}
if ((nowstate>=1)&&(me->ovs_ustate<1)){
me->ovs_state=0;
me->ovs_start=0;
me->ovs_ustart=0;
me->ovs_ustate=0;
need_drawing=1;
}
}
else if ((me->ovs_start>0)||(me->ovs_state)){
float nowstate=
libaroma_control_state(me->ovs_start,(me->ovs_bounce==1)?800:1600);
if (nowstate<1){
if (nowstate!=me->ovs_state){
me->ovs_state=nowstate;
need_drawing=1;
}
}
if ((me->ovs_state<1)&&((nowstate>=1)||
((nowstate>=0.2)&&(me->ovs_ustart==1)&&(me->ovs_state<1)))
){
me->ovs_state=0.5;
me->ovs_ustart=libaroma_tick();
me->ovs_ustate=0;
if (!me->ovs_bounce){
me->ovs_bounce=2;
}
need_drawing=1;
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
/* fling handler */
if ((me->velocity!=0)&&(!me->touched)){
/* onfling */
me->velocity=(me->velocity*246)>>8;
if ((abs(me->velocity)<256)||(me->touched)) {
/* ended */
me->velocity = 0;
need_drawing=1;
}
else{
/* still on fling */
int scroll_y = (me->velocity>>8) + me->scroll_y;
if (scroll_y>=me->max_scroll_y){
scroll_y=me->max_scroll_y;
if (me->scroll_y!=scroll_y){
me->bounce_velocity=MAX(-libaroma_dp(3840),
MIN(libaroma_dp(3840),(me->velocity*153)>>8));
me->ovs_bounce=1;
me->ovs_state=0;
me->ovs_y=0;
me->ovs_y=MIN(ctl->w*0.4,me->bounce_velocity>>4);
me->ovs_ustate=0;
me->ovs_ustart=1;
me->ovs_start=libaroma_tick()-16;
}
me->velocity = 0;
need_drawing=1;
}
if (scroll_y<=0){
scroll_y=0;
if (me->scroll_y!=scroll_y){
me->bounce_velocity=MAX(-libaroma_dp(3840),
MIN(libaroma_dp(3840),(me->velocity*153)>>8));
me->ovs_bounce=1;
me->ovs_state=0;
me->ovs_y=0;
me->ovs_y=MAX(0-ctl->w*0.4,me->bounce_velocity>>4);
me->ovs_ustate=0;
me->ovs_ustart=1;
me->ovs_start=libaroma_tick()-16;
}
me->velocity = 0;
need_drawing=1;
}
if (scroll_y!=me->scroll_y){
libaroma_ctl_scroll_set_pos(ctl, scroll_y);
}
}
}
else if (me->request_scroll_y!=-1){
/* direct request */
if (me->request_scroll_y!=me->scroll_y){
int move_sz = ((me->request_scroll_y-me->scroll_y)*64)>>8;
if (abs(move_sz)<2){
if (move_sz<0){
move_sz=-1;
}
else{
move_sz=1;
}
}
int target_sz = me->scroll_y+move_sz;
if (target_sz==me->request_scroll_y){
target_sz=me->request_scroll_y;
me->request_scroll_y=-1;
}
libaroma_ctl_scroll_set_pos(ctl,target_sz);
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
/* bounce handler */
if (me->bounce_velocity!=0){
/* bounce */
me->bounce_velocity=(me->bounce_velocity*153)>>8;
if (abs(me->bounce_velocity)<256){
me->bounce_velocity=0;
}
need_drawing=1;
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
/* scroll indicator handler */
if (me->scroll_tick!=0){
/* scroll indicator */
if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){
long diff= libaroma_tick()-me->scroll_tick;
if ((diff>1000)&&(me->scroll_state>0)){
int nowstate=round(256.0*(1.0-libaroma_control_state(
me->scroll_tick+1000,400)));
if (nowstate!=me->scroll_state){
me->scroll_state=nowstate;
need_drawing=1;
}
if (me->scroll_state<=0){
me->scroll_state=0;
me->scroll_tick=0;
me->scroll_handle_time=0;
}
}
else if ((diff<500)&&(me->scroll_state<256)){
if (!me->scroll_handle_time){
me->scroll_handle_time=me->scroll_tick;
}
int nowstate=round(256.0*
libaroma_control_state(me->scroll_handle_time,400));
if (nowstate!=me->scroll_state){
me->scroll_state=nowstate;
need_drawing=1;
}
if (me->scroll_state>=256){
me->scroll_state=256;
}
}
}
else{
me->scroll_tick=0;
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
}
#endif
if (need_drawing){
me->synced_y=-1;
}
if (me->request_new_height!=-1){
#ifndef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
libaroma_ctl_scroll_set_height(ctl,me->request_new_height);
libaroma_mutex_lock(me->fmutex);
me->request_new_height=-1;
libaroma_mutex_unlock(me->fmutex);
#else
return 0;
#endif
}
#ifndef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
if (me->cache_state==10){
_libaroma_ctl_scroll_updatecache(ctl, 0);
}
#endif
if (me->synced_y!=me->scroll_y){
return 1;
}
}
return 0;
} /* End of _libaroma_ctl_scroll_thread */
/*
* Function : _libaroma_ctl_scroll_draw
* Return Value: void
* Descriptions: draw callback
*/
void _libaroma_ctl_scroll_draw(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CANVASP c){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP,
);
if (me->client.handler){
if (!me->active){
libaroma_mutex_lock(me->mutex);
if (me->request_new_height!=-1){
int nrq=me->request_new_height;
libaroma_mutex_unlock(me->mutex);
libaroma_ctl_scroll_set_height(ctl,nrq);
libaroma_mutex_lock(me->mutex);
me->request_new_height=-1;
libaroma_mutex_unlock(me->mutex);
}
else{
libaroma_mutex_unlock(me->mutex);
}
if (me->cache_state==10){
_libaroma_ctl_scroll_updatecache(ctl, 0);
}
}
if (me->client_canvas!=NULL){
libaroma_mutex_lock(me->mutex);
int scroll_y = me->scroll_y;
int draw_y = (scroll_y-me->draw_y+me->cache_y)%me->client_canvas->h;
int draw_h = ctl->h;
if (me->client_canvas->h<=ctl->h){
/* no scroll */
if ((me->minscroll_cb)&&(me->minscroll_y)){
LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y);
if (mscv){
libaroma_draw(mscv,me->client_canvas,0,0,0);
me->minscroll_cb(ctl, mscv, me->scroll_y);
libaroma_canvas_free(mscv);
}
}
libaroma_canvas_setcolor(c,me->color_bg,0xff);
libaroma_draw_ex(
c, me->client_canvas,
0,0,
0,me->minscroll_y,
me->client_canvas->w, me->client_canvas->h-me->minscroll_y,
0,0xff);
me->synced_y=me->scroll_y;
}
else{
if ((me->minscroll_cb)&&(me->minscroll_y)){
int draw_yv = ((scroll_y-me->minscroll_y)-me->draw_y+me->cache_y)
%me->client_canvas->h;
LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y);
if (mscv){
libaroma_draw_ex(
mscv,me->client_canvas,
0,0,
0,draw_yv,
mscv->w,mscv->h,
0,0xff
);
me->minscroll_cb(ctl, mscv, me->scroll_y);
libaroma_canvas_free(mscv);
}
}
LIBAROMA_CANVASP tc=c;
int bvel=me->bounce_velocity;
if (bvel!=0){
libaroma_canvas_setcolor(tc,me->color_bg,0xff);
c=libaroma_canvas(tc->w,tc->h);
}
if (draw_y<0){
draw_y=me->client_canvas->h+draw_y;
}
if (draw_y+draw_h>me->client_canvas->h){
int top_h = (me->client_canvas->h - draw_y);
int bottom_h = draw_h - top_h;
int bottom_y = 0;
if (top_h<1){
bottom_h+=top_h;
bottom_y=abs(top_h);
}
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel sections
{
#pragma omp section
{
#endif
if (top_h>0){
if (!libaroma_draw_ex(
c,me->client_canvas,
0,0,
0,draw_y,
c->w,top_h,
0,0xff
)){
ALOGV("Error top_h: %i,%i",draw_y,draw_h);
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
if (bottom_h>0){
if (!libaroma_draw_ex(
c,me->client_canvas,
0,top_h,
0,bottom_y,
c->w,bottom_h,
0,0xff
)){
ALOGV("Error bottom_h: %i,%i - %i",
bottom_y,bottom_h,c->h);
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
}
#endif
me->synced_y=me->scroll_y;
}
else if ((draw_y<me->client_canvas->h)&&(draw_y>=0)){
if (!libaroma_draw_ex(
c,me->client_canvas,
0,0,
0,draw_y,
c->w,draw_h,
0,0xff
)){
ALOGV("Error draw_h: %i,%i",draw_y,draw_h);
}
me->synced_y=me->scroll_y;
}
if (bvel!=0){
int y_i = (int) bvel>>8;
libaroma_draw_ex(tc,c,0,0,0,y_i,tc->w,tc->h,0,0xff);
libaroma_canvas_free(c);
c=tc;
}
}
libaroma_mutex_unlock(me->mutex);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel sections
{
#pragma omp section
{
#endif
if (me->active){
if ((!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR))&&
(me->max_scroll_y>me->minscroll_y)){
if ((me->scroll_state>0)||(me->handle_touched)){
int hdl_w,hdl_r,ctl_y,ctl_h;
byte handle_opa=180;
byte is_dark = libaroma_color_isdark(me->color_bg);
word indicator_color = is_dark?RGB(cccccc):RGB(666666);
int vss=(me->handle_touched)?256:me->scroll_state;
if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){
hdl_w = libaroma_dp(5);
hdl_r = hdl_w*2;
if (!me->handle_touched){
hdl_r=(hdl_r * me->scroll_state) >> 8;
}
/* track */
ctl_y = libaroma_dp(18);
ctl_h = ctl->h - (ctl_y*2);
libaroma_draw_rect(
c,
ctl->w-(hdl_r+libaroma_dp(3)),
ctl_y,
libaroma_dp(1),
ctl_h,
libaroma_alpha(me->color_bg,indicator_color,(80*vss)>>8),
0xff
);
if (me->handle_touched){
handle_opa=0xff;
indicator_color=libaroma_colorget(ctl,NULL)->primary;
}
else{
handle_opa=220;
}
}
else{
ctl_y = libaroma_dp(2);
hdl_w = ctl_y*2;
hdl_r = libaroma_dp(5);
ctl_h = ctl->h - hdl_w;
handle_opa = 120;
}
int hdl_ch= (ctl->h * ctl_h)/me->client_h;
int hdl_h = MAX(hdl_ch,libaroma_dp(36));
hdl_ch = hdl_h-hdl_ch;
int hdl_y = ((scroll_y * (ctl_h-hdl_ch))/me->client_h)+ctl_y;
libaroma_draw_rect(
c,
ctl->w-(hdl_r+hdl_w),
hdl_y,
hdl_w,
hdl_h,
libaroma_alpha(me->color_bg,indicator_color,
(handle_opa*vss)>>8),
0xff
);
}
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
/* vertical border */
if (me->flags&LIBAROMA_CTL_SCROLL_WITH_VBORDER){
if (me->max_scroll_y>me->minscroll_y){
word divcolor = libaroma_color_isdark(me->color_bg)?RGB(cccccc):RGB(666666);
divcolor=libaroma_alpha(me->color_bg,divcolor,50);
if (scroll_y>me->minscroll_y){
libaroma_draw_rect(
c,
0,
0,
c->w,
libaroma_dp(1),
divcolor,
0xff
);
}
if (scroll_y<me->max_scroll_y){
libaroma_draw_rect(
c,
0,
c->h-libaroma_dp(1),
c->w,
libaroma_dp(1),
divcolor,
0xff
);
}
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
/* shadow */
if (me->flags&LIBAROMA_CTL_SCROLL_WITH_SHADOW){
libaroma_gradient_ex1(c, 0, 0, ctl->w,
libaroma_dp(5),0,0,0,0,80,0,2);
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
}
#endif
/* overshoot draw */
if ((me->max_scroll_y>me->minscroll_y)&&(me->ovs_state>0)&&(me->ovs_state<1)){
int max_ovsz = MIN(c->h/4,libaroma_dp(100));
int overshoot_sz = MIN(abs(me->ovs_y)/3,max_ovsz);
if (overshoot_sz>0){
float opa = 0;
if (me->ovs_state<0.25){
opa = libaroma_cubic_bezier_easein(me->ovs_state*4);
}
else{
opa = 1;
}
if (me->ovs_ustate>0){
opa*=1-libaroma_cubic_bezier_swiftout(me->ovs_ustate);
}
opa = MAX(0,MIN(1,opa));
if (me->ovs_ustate>0){
overshoot_sz = overshoot_sz * opa;
}
else{
overshoot_sz = overshoot_sz * MIN(1,opa*2);
}
float opacity=((float) overshoot_sz) / max_ovsz;
overshoot_sz = MIN(MIN(overshoot_sz,c->h/5),libaroma_dp(80));
if (overshoot_sz>1){
LIBAROMA_CANVASP ovshot = libaroma_canvas_ex(
c->w, overshoot_sz, 1);
int vw = c->w>>2;
if (me->ovs_x<0){
me->ovs_x=0;
}
else if (me->ovs_x>ctl->w){
me->ovs_x=ctl->w;
}
int vx = me->ovs_x>>2;
int ovw= overshoot_sz>>1;
int x1 = 0-(vw-vx);
int x2 = x1+c->w+vw;
if (me->ovs_custom_cb!=NULL){//use ovs effect callback
me->ovs_custom_cb(ctl, ovshot, (me->ovs_y<0));
}
else { //default ovs effect
//fill with primary color (without this it will be black)
libaroma_canvas_setcolor(ovshot,
libaroma_colorget(ctl,NULL)->primary,0);
if (me->ovs_y<0){
LIBAROMA_PATHP path=libaroma_path(x1,0);
libaroma_path_curve(
path, overshoot_sz,
x1+ovw, overshoot_sz,
x2-ovw, overshoot_sz,
x2, 0
);
libaroma_path_draw(ovshot, path, 0, 0x60*opacity, 1, 0.33);
libaroma_path_free(path);
}
else{
LIBAROMA_PATHP path=libaroma_path(x1,overshoot_sz-1);
libaroma_path_curve(
path, overshoot_sz,
x1+ovw, 0,
x2-ovw, 0,
x2,overshoot_sz-1
);
libaroma_path_draw(ovshot, path, 0, 0x60*opacity, 1, 0.33);
libaroma_path_free(path);
}
}
/* draw overshoot effect canvas on top of control */
libaroma_draw(c,ovshot,0,(me->ovs_y<0)?0:c->h-overshoot_sz,1);
libaroma_canvas_free(ovshot);
}
}
}
}
else{
if ((me->minscroll_cb)&&(me->minscroll_y)){
LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y);
if (mscv){
libaroma_canvas_setcolor(mscv,me->color_bg,0xff);
me->minscroll_cb(ctl, mscv, me->scroll_y);
libaroma_canvas_free(mscv);
}
}
libaroma_canvas_setcolor(c,me->color_bg,0xff);
}
}
else{
if ((me->minscroll_cb)&&(me->minscroll_y)){
LIBAROMA_CANVASP mscv=libaroma_canvas(c->w,me->minscroll_y);
if (mscv){
libaroma_canvas_setcolor(mscv,me->color_bg,0xff);
me->minscroll_cb(ctl, mscv, me->scroll_y);
libaroma_canvas_free(mscv);
}
}
libaroma_canvas_setcolor(c,me->color_bg,0xff);
}
} /* End of _libaroma_ctl_scroll_draw */
/*
* Function : _libaroma_ctl_scroll_touch_handler
* Return Value: dword
* Descriptions: touch message handler
*/
dword _libaroma_ctl_scroll_touch_handler(
LIBAROMA_CONTROLP ctl, LIBAROMA_MSGP msg,int x, int y, byte state){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
switch(state){
case LIBAROMA_HID_EV_STATE_DOWN:{
ALOGT("scroll_message - touch down: %i, %i",x, y);
byte is_have_velocity=(
(abs(me->velocity)>
libaroma_dp(2)*255
)||
me->bounce_velocity)?1:0;
byte is_direct_handle = 0;
if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){
me->handle_touched=
(x>ctl->w-libaroma_dp(_LIBAROMA_CTL_SCROLL_HANDLE_DP))?1:0;
is_direct_handle =
((me->handle_touched)&&(me->scroll_state)&&(me->max_scroll_y>0))?1:0;
}
else{
me->handle_touched=0;
}
/* set fling value */
me->bounce_velocity=0;
me->velocity=0;
me->allow_scroll=1;
me->touched=1;
/* check client message */
libaroma_mutex_lock(me->mutex);
me->client_touch_start=0;
me->client_touched=0;
if ((!is_have_velocity)&&(!is_direct_handle)&&
(me->client.handler->message)){
int client_x = x;
int client_y = y + me->scroll_y;
if (_libaroma_ctl_scroll_client_msg(
ctl,LIBAROMA_CTL_SCROLL_MSG_ISNEED_TOUCH,
client_x, client_y
)==LIBAROMA_CTL_SCROLL_MSG_HANDLED){
me->client_touch_start=msg->sent; /*libaroma_tick();*/
me->allow_scroll=2;
}
}
libaroma_mutex_unlock(me->mutex);
if (is_direct_handle){
me->allow_scroll=1;
int ctl_h = ctl->h-libaroma_dp(36);
int sarea = ctl_h - ((ctl->h * ctl_h) / me->client_h);
int scr_y = y-(ctl->h/2)+(sarea/2);
int req_y = (scr_y * me->max_scroll_y) / sarea;
libaroma_ctl_scroll_request_pos(ctl,req_y);
}
else if (me->flags&LIBAROMA_CTL_SCROLL_WITH_HANDLE){
me->request_scroll_y=-1;
}
libaroma_fling_down(&me->fling, y);
/* save touch value */
me->touch_x=x;
me->touch_y=y;
me->touch_scroll_y = me->scroll_y;
me->ovs_x=x;
}
break;
case LIBAROMA_HID_EV_STATE_MOVE:
case LIBAROMA_HID_EV_STATE_UP:{
ALOGT("scroll_message - touch move: %i, %i",x, y);
me->ovs_x=x;
me->bounce_velocity=0;
byte is_first_allowed = 0;
if (me->allow_scroll==2){
libaroma_mutex_lock(me->mutex);
int move_sz = me->touch_y - y;
int client_message_param = LIBAROMA_CTL_SCROLL_MSG_TOUCH_MOVE;
int scrdp=libaroma_dp(
me->client_touched?
_LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP:
_LIBAROMA_CTL_SCROLL_MIN_ALOWSCROLL_DP_NOITEM
);
if (abs(move_sz)>=scrdp){
is_first_allowed = 1;
me->allow_scroll=1;
me->client_touch_start=0;
client_message_param = LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL;
}
/* send client message */
if ((me->client_touched)&&(me->client.handler->message)){
int client_x = x;
int client_y = y + me->scroll_y;
if (_libaroma_ctl_scroll_client_msg(
ctl,client_message_param,
client_x, client_y
)==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){
me->synced_y=-1;
}
if (client_message_param==LIBAROMA_CTL_SCROLL_MSG_TOUCH_CANCEL){
me->client_touched=0;
}
}
libaroma_mutex_unlock(me->mutex);
}
/* scrolling move handler */
if ((me->allow_scroll==1)&&(me->touch_y!=y)){
int move_sz = me->touch_y - y;
if (!me->handle_touched){
if (me->scroll_y+move_sz<me->minscroll_y){
if (!me->ovs_start){
me->ovs_start=msg->sent; /*libaroma_tick();*/
me->ovs_bounce=0;
me->ovs_state=0;
me->ovs_ustate=0;
me->ovs_ustart=0;
me->ovs_y=0;
}
me->ovs_y+=move_sz;
}
else if (me->scroll_y+move_sz>me->max_scroll_y){
if (!me->ovs_start){
me->ovs_start=msg->sent; /*libaroma_tick();*/
me->ovs_bounce=0;
me->ovs_state=0;
me->ovs_ustate=0;
me->ovs_ustart=0;
me->ovs_y=0;
}
me->ovs_y+=move_sz;
}
else if (!me->ovs_ustart){
me->ovs_ustate=0;
me->ovs_ustart=1;
me->ovs_bounce=3;
}
/* normal scroll */
if (is_first_allowed){
libaroma_ctl_scroll_request_pos(ctl, me->touch_scroll_y+move_sz);
}
else{
me->request_scroll_y=-1;
libaroma_ctl_scroll_set_pos(ctl, me->touch_scroll_y+move_sz);
}
me->touch_scroll_y = me->scroll_y;
/* set history */
libaroma_fling_move(&me->fling, y);
}
else if (me->max_scroll_y>0){
int ctl_h = ctl->h-libaroma_dp(36);
int sarea = ctl_h - ((ctl->h * ctl_h) / me->client_h);
int scr_y = y-(ctl->h/2)+(sarea/2);
int req_y = (scr_y * me->max_scroll_y) / sarea;
libaroma_ctl_scroll_request_pos(ctl,req_y);
}
me->touch_y=y;
}
if (state==LIBAROMA_HID_EV_STATE_UP){
ALOGT("scroll_message - touch up: %i, %i",x, y);
me->bounce_velocity=0;
if (!me->handle_touched){
if (me->allow_scroll){
me->velocity=(libaroma_fling_up(&me->fling, y)*
libaroma_px(18))/libaroma_dp(4);
if (me->velocity){
me->touched=0;
}
}
}
else if (me->allow_scroll==1){
if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){
me->scroll_tick = msg->sent; /*libaroma_tick();*/
me->scroll_state=256;
me->synced_y=-1;
}
}
/* clear item touch if initialized */
libaroma_mutex_lock(me->mutex);
if ((me->client_touch_start||me->client_touched)&&
(me->client.handler->message)){
int client_x = x;
int client_y = y + me->scroll_y;
if (me->client_touch_start){
if (_libaroma_ctl_scroll_client_msg(
ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_DOWN,
client_x, client_y
)==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){
me->synced_y=-1;
}
}
if (_libaroma_ctl_scroll_client_msg(
ctl,LIBAROMA_CTL_SCROLL_MSG_TOUCH_UP,
client_x, client_y
)==LIBAROMA_CTL_SCROLL_MSG_NEED_DRAW){
me->synced_y=-1;
}
}
me->client_touch_start=0;
me->client_touched=0;
libaroma_mutex_unlock(me->mutex);
/* reset */
me->handle_touched=0;
me->allow_scroll=0;
me->touched=0;
me->touch_x=0;
me->touch_y=0;
me->ovs_x=x;
if (!me->ovs_ustart){
me->ovs_ustate=0;
me->ovs_ustart=1;
me->ovs_bounce=3;
}
}
}
break;
}
return 0;
} /* End of _libaroma_ctl_scroll_touch_handler */
/*
* Function : libaroma_ctl_scroll_isactive
* Return Value: byte
* Descriptions: check if control is active
*/
byte libaroma_ctl_scroll_isactive(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
return me->active;
} /* End of libaroma_ctl_scroll_isactive */
/*
* Function : _libaroma_ctl_scroll_msg
* Return Value: byte
* Descriptions: message callback
*/
dword _libaroma_ctl_scroll_msg(
LIBAROMA_CONTROLP ctl,
LIBAROMA_MSGP msg){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
switch(msg->msg){
case LIBAROMA_MSG_TOUCH:
{
/* touch handler */
int x = msg->x;
int y = msg->y;
libaroma_window_calculate_pos(NULL,ctl,&x,&y);
return _libaroma_ctl_scroll_touch_handler(
ctl,msg,x,y,msg->state
);
}
break;
case LIBAROMA_MSG_WIN_ACTIVE:
{
/* start updater thread*/
me->active=1;
me->client_touch_start=0;
me->client_touched=0;
me->synced_y = -1;
/* start cache thread */
#ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
libaroma_thread_create(
&me->cache_thread,
_libaroma_ctl_scroll_cache_thread,
(voidp) ctl);
#endif
libaroma_thread_create(
&me->calc_thread,
_libaroma_ctl_scroll_calc_thread,
(voidp) ctl);
}
break;
case LIBAROMA_MSG_WIN_INACTIVE:
{
/* stop updater thread */
me->active=0;
libaroma_sleep(30);
libaroma_cond_lock(&me->cmutex);
libaroma_cond_signal(&me->ccond);
libaroma_cond_unlock(&me->cmutex);
libaroma_mutex_lock(me->mutex);
libaroma_thread_join(me->calc_thread);
#ifdef LIBAROMA_CTL_SCROLL_WITH_CACHE_THREAD
libaroma_thread_join(me->cache_thread);
me->cache_thread=0;
#endif
me->calc_thread=0;
me->client_touch_start=0;
me->client_touched=0;
me->synced_y = -1;
libaroma_mutex_unlock(me->mutex);
}
break;
}
return 0;
} /* End of _libaroma_ctl_scroll_msg */
/*
* Function : _libaroma_ctl_scroll_destroy
* Return Value: void
* Descriptions: destroy callback
*/
void _libaroma_ctl_scroll_destroy(
LIBAROMA_CONTROLP ctl){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP,
);
/* destroy client */
if (me->client.handler->destroy!=NULL){
me->client.handler->destroy(ctl,&me->client);
}
if (me->client_canvas!=NULL){
libaroma_canvas_free(me->client_canvas);
me->client_canvas=NULL;
}
libaroma_cond_free(&me->ccond, &me->cmutex);
libaroma_mutex_free(me->blitmutex);
libaroma_mutex_free(me->fmutex);
libaroma_mutex_free(me->mutex);
free(me);
} /* End of _libaroma_ctl_scroll_destroy */
/*
* Function : libaroma_ctl_scroll
* Return Value: LIBAROMA_CONTROLP
* Descriptions: create scroll control
*/
LIBAROMA_CONTROLP libaroma_ctl_scroll(
LIBAROMA_WINDOWP win, word id,
int x, int y, int w, int h,
word bg_color, byte flags
){
/* init internal data */
_LIBAROMA_CTL_SCROLLP me = (_LIBAROMA_CTL_SCROLLP)
calloc(sizeof(_LIBAROMA_CTL_SCROLL),1);
if (!me){
ALOGW("libaroma_ctl_scroll alloc scroll memory failed");
return NULL;
}
libaroma_mutex_init(me->blitmutex); /* blit drawing mutex */
libaroma_mutex_init(me->fmutex); /* cache drawing mutex */
libaroma_mutex_init(me->mutex); /* control drawing mutex */
libaroma_cond_init(&me->ccond, &me->cmutex);
/* set internal data */
me->flags = flags;
me->color_bg = bg_color;
me->request_new_height=-1;
me->request_scroll_y=-1;
me->synced_y = -1;
/* init control */
LIBAROMA_CONTROLP ctl =
libaroma_control_new(
id,
x, y, w, h,
libaroma_dp(32),libaroma_dp(32), /* min size */
me,
&_libaroma_ctl_scroll_handler,
win
);
if (!ctl){
free(me);
}
return ctl;
} /* End of libaroma_ctl_scroll */
/*
* Function : libaroma_ctl_scroll_request_height
* Return Value: byte
* Descriptions: request height
*/
byte libaroma_ctl_scroll_request_height(LIBAROMA_CONTROLP ctl, int h){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
libaroma_mutex_lock(me->fmutex);
me->request_new_height=h;
libaroma_mutex_unlock(me->fmutex);
return 1;
} /* End of libaroma_ctl_scroll_request_height */
/*
* Function : libaroma_ctl_scroll_get_scroll
* Return Value: int
* Descriptions: get scroll position
*/
int libaroma_ctl_scroll_get_scroll(LIBAROMA_CONTROLP ctl, int * scroll_h){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (scroll_h!=NULL){
*scroll_h=me->max_scroll_y;
}
return me->scroll_y;
} /* End of libaroma_ctl_scroll_get_scroll */
/*
* Function : libaroma_ctl_scroll_get_height
* Return Value: int
* Descriptions: get scroll height
*/
int libaroma_ctl_scroll_get_height(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
int ret=me->client_h;
libaroma_mutex_lock(me->fmutex);
if (me->request_new_height!=-1){
ret=me->request_new_height;
}
libaroma_mutex_unlock(me->fmutex);
return ret;
} /* End of libaroma_ctl_scroll_get_height */
/*
* Function : libaroma_ctl_scroll_set_height
* Return Value: byte
* Descriptions: set scroll height
*/
byte libaroma_ctl_scroll_set_height(LIBAROMA_CONTROLP ctl, int h){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client_h==h){
return 0;
}
libaroma_mutex_lock(me->blitmutex);
libaroma_mutex_lock(me->fmutex);
me->max_scroll_y = h-ctl->h;
if (me->max_scroll_y<me->minscroll_y){
me->max_scroll_y=me->minscroll_y;
}
if (h<1){
if (me->client_canvas!=NULL){
me->client_h = h;
libaroma_mutex_lock(me->mutex);
libaroma_canvas_free(me->client_canvas);
me->client_canvas=NULL;
libaroma_mutex_unlock(me->mutex);
}
}
else{
/* max 3x control height */
int valid_height = h;
#ifdef LIBAROMA_CTL_SCROLL_WITH_MAX_CACHE
if (valid_height>_LIBAROMA_CTL_SCROLL_MAX_CACHE){
valid_height=_LIBAROMA_CTL_SCROLL_MAX_CACHE;
}
#endif
LIBAROMA_CANVASP c=me->client_canvas;
if (me->client_canvas){
if (valid_height!=c->h){
int ns = c->l * valid_height;
if (ns>c->s){
libaroma_mutex_lock(me->mutex);
c->data=realloc(c->data,ns*2);
libaroma_mutex_unlock(me->mutex);
c->s=ns;
c->h=valid_height;
me->client_h = h;
}
else{
me->client_h = h;
c->s=ns;
c->h=valid_height;
libaroma_mutex_lock(me->mutex);
c->data=realloc(c->data,ns*2);
libaroma_mutex_unlock(me->mutex);
}
}
else{
me->client_h = h;
}
}
else{
libaroma_mutex_lock(me->mutex);
c = libaroma_canvas(ctl->w,valid_height);
libaroma_canvas_setcolor(c,me->color_bg,0xff);
me->client_canvas = c;
libaroma_mutex_unlock(me->mutex);
me->client_h = h;
}
}
me->synced_y=-1;
libaroma_mutex_unlock(me->fmutex);
libaroma_mutex_unlock(me->blitmutex);
libaroma_ctl_scroll_set_pos(ctl,me->scroll_y);
me->cache_state = 10; /* force recalculate */
return 1;
} /* End of libaroma_ctl_scroll_set_height */
/*
* Function : libaroma_ctl_scroll_getflags
* Return Value: byte
* Descriptions: get scroll flags
*/
byte libaroma_ctl_scroll_getflags(
LIBAROMA_CONTROLP ctl
){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
return me->flags;
}
/*
* Function : libaroma_ctl_scroll_setflags
* Return Value: byte
* Descriptions: set scroll flags
*/
byte libaroma_ctl_scroll_setflags(
LIBAROMA_CONTROLP ctl,
byte flags){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
me->flags = flags;
return 1;
}
/*
* Function : libaroma_ctl_scroll_set_pos
* Return Value: byte
* Descriptions: set scroll position - directly
*/
byte libaroma_ctl_scroll_set_pos(LIBAROMA_CONTROLP ctl, int scroll_y){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
int req_scroll_y = scroll_y;
if (req_scroll_y>me->max_scroll_y){
req_scroll_y = me->max_scroll_y;
}
if (req_scroll_y<me->minscroll_y){
req_scroll_y=me->minscroll_y;
}
if (me->scroll_y!=req_scroll_y){
me->move_state=(req_scroll_y<me->scroll_y)?1:2;
me->scroll_y=req_scroll_y;
if (!me->cache_state){
me->cache_state=1;
}
if (!(me->flags&LIBAROMA_CTL_SCROLL_NO_INDICATOR)){
me->scroll_tick = libaroma_tick();
}
}
return 1;
} /* End of libaroma_ctl_scroll_set_pos */
/*
* Function : libaroma_ctl_scroll_request_pos
* Return Value: byte
* Descriptions: request to change scroll position - nicely
*/
byte libaroma_ctl_scroll_request_pos(LIBAROMA_CONTROLP ctl, int req_y){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (req_y>me->max_scroll_y){
me->request_scroll_y=me->max_scroll_y;
}
else if (req_y<me->minscroll_y){
me->request_scroll_y=me->minscroll_y;
}
else{
me->request_scroll_y=req_y;
}
return 1;
} /* End of libaroma_ctl_scroll_request_pos */
/*
* Function : libaroma_ctl_scroll_get_bg_color
* Return Value: byte
* Descriptions: request to change scroll position - nicely
*/
word libaroma_ctl_scroll_get_bg_color(LIBAROMA_CONTROLP ctl){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
return me->color_bg;
} /* End of libaroma_ctl_scroll_get_bg_color */
/*
* Function : libaroma_ctl_scroll_set_client
* Return Value: byte
* Descriptions: set client handler
*/
byte libaroma_ctl_scroll_set_client(
LIBAROMA_CONTROLP ctl,
voidp internal,
LIBAROMA_CTL_SCROLL_CLIENT_HANDLERP handler
){
if (handler==NULL){
return 0;
}
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client.handler){
if (me->client.handler->destroy!=NULL){
me->client.handler->destroy(ctl,&me->client);
}
}
me->client.handler=handler;
me->client.internal=internal;
me->synced_y=-1;
me->cache_state = 10; /* force recalculate */
return 1;
} /* End of libaroma_ctl_scroll_set_client */
/*
* Function : libaroma_ctl_scroll_get_client
* Return Value: LIBAROMA_CTL_SCROLL_CLIENTP
* Descriptions: get scroll client data
*/
LIBAROMA_CTL_SCROLL_CLIENTP libaroma_ctl_scroll_get_client(
LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, NULL
);
if (!me->client.handler){
return NULL;
}
return &me->client;
} /* End of libaroma_ctl_scroll_get_client */
/*
* Function : libaroma_ctl_scroll_is_visible
* Return Value: byte
* Descriptions: is this area visible?
*/
byte libaroma_ctl_scroll_is_visible(
LIBAROMA_CONTROLP ctl, int y, int h
){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client_canvas==NULL){
return 0;
}
if (!me->active){
return 0;
}
int draw_t=me->draw_y;
int draw_b=draw_t+me->client_canvas->h;
int bottom = y+h;
if ((bottom>draw_t)&&(y<draw_b)){
return 1;
}
return 0;
} /* End of libaroma_ctl_scroll_is_visible */
/*
* Function : libaroma_ctl_scroll_blit
* Return Value: byte
* Descriptions: blit canvas into client canvas
*/
byte libaroma_ctl_scroll_blit(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CANVASP canvas,
int x, int y, int w, int h,
byte erase
){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (me->client_canvas==NULL){
return 0;
}
if (!me->active){
return 0;
}
if (x<0){
x=0;
}
if ((w<1)||(x+w>me->client_canvas->w)){
w=me->client_canvas->w-x;
}
int bottom = y+h;
int draw_t = me->draw_y;
int draw_b = draw_t+me->client_canvas->h;
if ((bottom>draw_t)&&(y<draw_b)){
int dy = (y-draw_t+me->cache_y)%me->client_canvas->h;
int split_h = (dy+h)-me->client_canvas->h;
byte is_split=((dy+h>me->client_canvas->h)&&(me->cache_y)&&(split_h>0));
libaroma_mutex_lock(me->blitmutex);
if (erase){
libaroma_draw_rect(
me->client_canvas, x, dy, w, h, me->color_bg, 0xff
);
if (is_split){
libaroma_draw_rect(
me->client_canvas, x, 0, w, split_h, me->color_bg, 0xff
);
}
}
libaroma_draw_ex(
me->client_canvas,
canvas,
x, dy, 0, 0, w, h,
1,
0xff
);
if (is_split){
libaroma_draw_ex(
me->client_canvas,
canvas,
x, 0, 0, h-split_h, w, split_h,
1,
0xff
);
}
libaroma_mutex_unlock(me->blitmutex);
return 1;
}
return 0;
} /* End of libaroma_ctl_scroll_blit */
/*
* Function : libaroma_ctl_scroll_set_min_scroll
* Return Value: byte
* Descriptions: set minimal scroll y
*/
byte libaroma_ctl_scroll_set_min_scroll(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_MINSCROLL_HANDLER cb, int y
){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
if (y<0){
return 0;
}
libaroma_mutex_lock(me->fmutex);
me->minscroll_cb=cb;
me->minscroll_y=y;
me->synced_y=-1;
libaroma_mutex_unlock(me->fmutex);
return 1;
}
/*
* Function : libaroma_ctl_scroll_set_ovs_callback
* Return Value: byte
* Descriptions: set overscroll effect callback
*/
byte libaroma_ctl_scroll_set_ovs_callback(
LIBAROMA_CONTROLP ctl, LIBAROMA_CTL_SCROLL_OVERSHOOT_EFFECT cb
){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_scroll_handler, _LIBAROMA_CTL_SCROLLP, 0
);
libaroma_mutex_lock(me->fmutex);
me->ovs_custom_cb=cb;
libaroma_mutex_unlock(me->fmutex);
return 1;
}
#endif /* __libaroma_ctl_scroll_c__ */
|
fc_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Author: 1091545398@qq.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "fc_kernel_int8_arm.h"
void gemv_1x8_int8(int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8x8_t input;
int8x16_t weight_0_1, weight_2_3, weight_4_5, weight_6_7;
int16x8_t weight0_16, weight1_16, weight2_16, weight3_16;
int16x8_t weight4_16, weight5_16, weight6_16, weight7_16;
int32x4_t res = {0, 0, 0, 0};
int32x4_t res1 = {0, 0, 0, 0};
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size >> 3) << 3;
for (int i = 0; i < remainw; i = i + 8) {
input = vld1_s8(input_ptr);
weight_0_1 = vld1q_s8(weight_ptr);
weight_2_3 = vld1q_s8(weight_ptr + 16);
weight_4_5 = vld1q_s8(weight_ptr + 32);
weight_6_7 = vld1q_s8(weight_ptr + 48);
weight0_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 0)), vget_low_s8(weight_0_1));
weight1_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 1)), vget_high_s8(weight_0_1));
weight2_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 2)), vget_low_s8(weight_2_3));
weight3_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 3)), vget_high_s8(weight_2_3));
weight4_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 4)), vget_low_s8(weight_4_5));
weight5_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 5)), vget_high_s8(weight_4_5));
weight6_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 6)), vget_low_s8(weight_6_7));
weight7_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 7)), vget_high_s8(weight_6_7));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight0_16), vget_low_s16(weight1_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight2_16), vget_low_s16(weight3_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight4_16), vget_low_s16(weight5_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight6_16), vget_low_s16(weight7_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight0_16), vget_high_s16(weight1_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight2_16), vget_high_s16(weight3_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight4_16), vget_high_s16(weight5_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight6_16), vget_high_s16(weight7_16)));
input_ptr += 8;
weight_ptr += 64;
}
for (int i = remainw; i < kernel_size; ++i) {
weight0_16 = vmull_s8(vdup_n_s8(input_ptr[0]), vld1_s8(weight_ptr));
res = vaddq_s32(vmovl_s16(vget_low_s16(weight0_16)), res);
res1 = vaddq_s32(vmovl_s16(vget_high_s16(weight0_16)), res1);
input_ptr += 1;
weight_ptr += 8;
}
if (biases) {
int32x4_t bias = vld1q_s32(biases);
int32x4_t bias1 = vld1q_s32(biases + 4);
res = vaddq_s32(res,bias);
res1 = vaddq_s32(res1,bias1);
}
float32x4_t res_f = vcvtq_f32_s32(res);
float32x4_t res1_f = vcvtq_f32_s32(res1);
float32x4_t scale = vld1q_f32(scales);
float32x4_t scale_1 = vld1q_f32(scales + 4);
res_f = vmulq_f32(res_f, scale);
res1_f = vmulq_f32(res1_f, scale_1);
res_f = vaddq_f32(res_f,vdupq_n_f32(0.5f));
res1_f = vaddq_f32(res1_f,vdupq_n_f32(0.5f));
res = vcvtq_s32_f32(res_f);
res1 = vcvtq_s32_f32(res1_f);
int16x4_t res_16 = vmovn_s32(res);
int16x4_t res1_16 = vmovn_s32(res1);
int8x8_t result = vmovn_s16(vcombine_s16(res_16, res1_16));
int8x8_t _m127 = vdup_n_s8(127);
int8x8_t _m_127 = vdup_n_s8(-127);
result = vmax_s8(_m_127, result);
result = vmin_s8(_m127, result);
vst1_s8(output, result);
}
void gemv_1x2_int8(const int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size << 3) >> 3;
int8x8x2_t weight;
int8x8_t input;
int16x8_t out_16_0, out_16_1;
int32x4_t out_32_0, out_32_1;
int32_t sum0 = 0, sum1 = 0;
for (int i = 0; i < remainw; i = i + 8) {
weight = vld2_s8(weight_ptr);
input = vld1_s8(input_ptr);
out_16_0 = vmull_s8(weight.val[0], input);
out_16_1 = vmull_s8(weight.val[1], input);
out_32_0 = vpaddlq_s16(out_16_0);
out_32_1 = vpaddlq_s16(out_16_1);
sum0 += vgetq_lane_s32(out_32_0, 0) + vgetq_lane_s32(out_32_0, 1) +
vgetq_lane_s32(out_32_0, 2) + vgetq_lane_s32(out_32_0, 3);
sum1 += vgetq_lane_s32(out_32_1, 0) + vgetq_lane_s32(out_32_1, 1) +
vgetq_lane_s32(out_32_1, 2) + vgetq_lane_s32(out_32_1, 3);
weight_ptr += 16;
input_ptr += 8;
}
for (int i = remainw; i < kernel_size; ++i) {
sum0 += weight_ptr[0] * input_ptr[0];
sum1 += weight_ptr[1] * input_ptr[0];
input_ptr++;
weight_ptr += 2;
}
if (biases) {
sum0 += biases[0];
sum1 += biases[1];
}
int data_i32_0 = round(sum0 * scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
int data_i32_1 = round(sum1 * scales[1]);
if (data_i32_1 > 127)
data_i32_1 = 127;
else if (data_i32_0 < -127)
data_i32_1 = -127;
output[0] = data_i32_0;
output[1] = data_i32_1;
}
// start and end channel must be 8 aligned
void gemv1x8(const int8_t *input, const int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size, int start_channel, int end_channel, int num_thread,
int cpu_affinity) {
int ch = 0;
int8_t *cur_kernel, *cur_result;
int32_t *cur_biases;
const float *cur_scales;
// #pragma omp parallel for num_threads(num_thread)
for (ch = start_channel; ch < end_channel; ch += 8) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x8_int8(cur_biases, cur_scales, (int8_t *) input, cur_kernel, kernel_size,
cur_result);
}
}
// start channel must be 2 aligned
void gemv1x2(const int8_t *input, int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size,int start_channel,int end_channel,int num_thread,int cpu_affinity)
{
int32_t sum;
int ch = 0;
int8_t *cur_kernel;
int32_t *cur_biases;
int8_t *cur_result;
const float* cur_scales;
for (ch = start_channel; ch < (end_channel & -2); ch += 2) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x2_int8(cur_biases, cur_scales, (int8_t*) input, cur_kernel, kernel_size, cur_result);
}
if (end_channel & 0x1) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
sum = biases ? *(biases + ch) : 0;
for (int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
int data_i32_0 = round(sum * cur_scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
*cur_result = data_i32_0;
}
}
static void interleave_kernel(const int8_t *kernel, int8_t *kernel_interleaved, int out_chan, int kernel_size) {
int i, j, k;
int8_t *cur_kernel[8];
int8_t *cur_kernel_interleaved;
// interleave 8 kernel
for (i = 0; i < (out_chan & -8); i += 8) {
for (j = 0; j < 8; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for (; i < (out_chan & -2); i += 2) {
for (j = 0; j < 2; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if (out_chan & 0x1) {
cur_kernel[0] = (int8_t *) kernel + kernel_size * i;
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int int8_fc_kernel_prerun(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param) {
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer) {
int mem_size = num_output * kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer) {
int mem_size = kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
int8_t *filter_data = (int8_t *) filter_tensor->data;
interleave_kernel(filter_data, (int8_t *) priv_info->interleave_buffer, num_output,
kernel_size);
return 0;
}
int int8_fc_kernel_run(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *bias_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param, \
int num_thread, int cpu_affinity) {
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
int8_t *input = (int8_t *) input_tensor->data;
int8_t *output = (int8_t *) output_tensor->data;
int8_t *weight = (int8_t *) priv_info->interleave_buffer;
int32_t *biases = NULL;
if (bias_tensor)
biases = (int32_t *) bias_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
float *weight_scales = filter_tensor->scale_list;
float *requant_scales = (float *) malloc(out_num * sizeof(float));
for (int i = 0; i < out_num; i++)
requant_scales[i] = (input_scale * weight_scales[i]) / output_scale;
int out_num_8 = out_num & ~7;
for (int i = 0; i < input_tensor->dims[0]; i++) {
int8_t *cur_input = input + i * kernel_size;
int8_t *cur_output = output + i * out_num;
gemv1x8(cur_input, cur_output, weight, biases, requant_scales, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if (out_num & 0x7)
gemv1x2(cur_input, cur_output, weight, biases, requant_scales, kernel_size, out_num_8,out_num,num_thread, cpu_affinity);
}
return 0;
}
|
DES_bs_b.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2003,2010-2013,2015 by Solar Designer
*
* Addition of single DES encryption with no salt by
* Deepika Dutta Mishra <dipikadutta at gmail.com> in 2012, no
* rights reserved.
*/
#ifdef _MSC_VER
#undef _OPENMP
#endif
#include "arch.h"
#include "common.h"
#include "DES_bs.h"
#include "memdbg.h"
#if DES_BS_ASM && defined(_OPENMP) && defined(__GNUC__)
#warning Assembly code and OpenMP are both requested - will provide the former, but not the latter (for DES-based hashes). This may likely be corrected by enabling SIMD intrinsics with the C compiler (try adding -msse2 to OMPFLAGS).
#endif
#if !DES_BS_ASM
#define vzero (*(vtype *)&DES_bs_all.zero)
#if DES_bs_mt
#define vones (*(vtype *)&DES_bs_all_by_tnum(-1).ones)
#else
#define vones (*(vtype *)&DES_bs_all.ones)
#endif
#define DES_BS_VECTOR_LOOPS 0
#if defined(__ARM_NEON) && DES_BS_DEPTH == 64
#include <arm_neon.h>
typedef uint32x2_t vtype;
#define vst(dst, ofs, src) \
vst1_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veor_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvn_u32((a))
#define vand(dst, a, b) \
(dst) = vand_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorr_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbic_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbsl_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vadd_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshl_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshr_n_u32((src), (shift))
#elif defined(__ARM_NEON) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <arm_neon.h>
typedef struct {
uint32x2_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 && defined(DES_BS_2X64)
#include <arm_neon.h>
typedef struct {
uint32x2_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = veor_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = vmvn_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = vand_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = vorr_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = vbic_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = vbsl_u32((c).g, (b).g, (a).g)
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 128
#include <arm_neon.h>
typedef uint32x4_t vtype;
#define vst(dst, ofs, src) \
vst1q_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veorq_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvnq_u32((a))
#define vand(dst, a, b) \
(dst) = vandq_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorrq_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbicq_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbslq_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vaddq_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshlq_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshrq_n_u32((src), (shift))
#elif defined(__ARM_NEON) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <arm_neon.h>
typedef struct {
uint32x4_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON) && DES_BS_DEPTH == 256
#include <arm_neon.h>
typedef struct {
uint32x4_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = veorq_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = vmvnq_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = vandq_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = vorrq_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = vbicq_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = vbslq_u32((c).g, (b).g, (a).g)
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 128
#ifdef __linux__
#include <altivec.h>
#endif
typedef vector signed int vtype;
#define vst(dst, ofs, src) \
vec_st((src), (ofs) * sizeof(DES_bs_vector), (dst))
#define vxorf(a, b) \
vec_xor((a), (b))
#define vnot(dst, a) \
(dst) = vec_nor((a), (a))
#define vand(dst, a, b) \
(dst) = vec_and((a), (b))
#define vor(dst, a, b) \
(dst) = vec_or((a), (b))
#define vandn(dst, a, b) \
(dst) = vec_andc((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vec_sel((a), (b), (vector bool int)(c))
#elif defined(__ALTIVEC__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 256
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f, g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
vec_st((src).g, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->g)
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = vec_xor((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = vec_nor((a).g, (a).g)
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = vec_and((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = vec_or((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = vec_andc((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = vec_sel((a).g, (b).g, (vector bool int)(c).g)
#elif defined(__MIC__) && DES_BS_DEPTH == 512
#include <immintrin.h>
typedef __m512i vtype;
#define vst(dst, ofs, src) \
_mm512_store_epi32((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm512_xor_epi32((a), (b))
#define vand(dst, a, b) \
(dst) = _mm512_and_epi32((a), (b))
#define vor(dst, a, b) \
(dst) = _mm512_or_epi32((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm512_andnot_epi32((b), (a))
#define vshl1(dst, src) \
(dst) = _mm512_add_epi32((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm512_slli_epi32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm512_srli_epi32((src), (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 256 && !defined(DES_BS_NO_AVX256)
#include <immintrin.h>
typedef __m256i vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm256_xor_si256((a), (b))
#define vand(dst, a, b) \
(dst) = _mm256_and_si256((a), (b))
#define vor(dst, a, b) \
(dst) = _mm256_or_si256((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm256_andnot_si256((b), (a))
#ifdef __XOP__
/* This could be _mm256_cmov_si256(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst) = __builtin_ia32_vpcmov_v8sf256((b), (a), (c))
#endif
#define vshl1(dst, src) \
(dst) = _mm256_add_epi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm256_slli_epi64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm256_srli_epi64((src), (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 384 && !defined(DES_BS_NO_AVX128)
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
typedef struct {
__m256i f;
__m128i g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 512
#include <immintrin.h>
typedef struct {
__m256i f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm256_xor_si256((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm256_and_si256((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm256_or_si256((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm256_andnot_si256((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = __builtin_ia32_vpcmov_v8sf256((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm256_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm256_srli_epi64((src).g, (shift))
#elif defined(__AVX__) && defined(__MMX__) && DES_BS_DEPTH == 320 && \
!defined(DES_BS_NO_MMX)
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__AVX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 320) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 288))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm256_xor_si256((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__AVX__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 384) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 352))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
__m256i f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_si256((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm256_xor_si256((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm256_and_si256((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm256_or_si256((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_si256((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl(dst, src, shift) \
(dst).f = _mm256_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm256_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__SSE2__) && DES_BS_DEPTH == 128
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef __m128i vtype;
#define vst(dst, ofs, src) \
_mm_store_si128((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm_xor_si128((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si128((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si128((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si128((b), (a))
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst) = _mm_cmov_si128((b), (a), (c))
#else
#define vsel(dst, a, b, c) \
(dst) = _mm_xor_si128(_mm_andnot_si128((c), (a)), \
_mm_and_si128((c), (b)))
#endif
#define vshl1(dst, src) \
(dst) = _mm_add_epi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_epi64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_epi64((src), (shift))
#elif defined(__SSE2__) && DES_BS_DEPTH == 256 && defined(DES_BS_NO_MMX)
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef struct {
__m128i f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst).f = _mm_cmov_si128((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_epi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__SSE2__) && defined(__MMX__) && DES_BS_DEPTH == 192 && \
!defined(DES_BS_NO_MMX)
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__SSE2__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <emmintrin.h>
typedef struct {
__m128i f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__SSE2__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 256) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 224))
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g); \
(dst).h = (src).h << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__MMX__) && ARCH_BITS != 64 && DES_BS_DEPTH == 64
#include <mmintrin.h>
typedef __m64 vtype;
#define vxorf(a, b) \
_mm_xor_si64((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si64((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si64((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si64((b), (a))
#define vshl1(dst, src) \
(dst) = _mm_add_pi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_si64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_si64((src), (shift))
#elif defined(__MMX__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <mmintrin.h>
typedef struct {
__m64 f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f = (src).f; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si64((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si64((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si64((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si64((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si64((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_pi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_si64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_si64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#else
#if DES_BS_VECTOR
#undef DES_BS_VECTOR_LOOPS
#define DES_BS_VECTOR_LOOPS 1
#endif
typedef unsigned ARCH_WORD vtype;
#define vxorf(a, b) \
((a) ^ (b))
#define vnot(dst, a) \
(dst) = ~(a)
#define vand(dst, a, b) \
(dst) = (a) & (b)
#define vor(dst, a, b) \
(dst) = (a) | (b)
#define vandn(dst, a, b) \
(dst) = (a) & ~(b)
#define vsel(dst, a, b, c) \
(dst) = (((a) & ~(c)) ^ ((b) & (c)))
#define vshl(dst, src, shift) \
(dst) = (src) << (shift)
#define vshr(dst, src, shift) \
(dst) = (src) >> (shift)
/* Assume that 0 always fits in one load immediate instruction */
#undef vzero
#define vzero 0
/* Archs friendly to use of immediate values */
#if defined(__x86_64__) || defined(__i386__)
#undef vones
#define vones (~(vtype)0)
#endif
#endif
#ifndef vst
#define vst(dst, ofs, src) \
*((vtype *)((DES_bs_vector *)&(dst) + (ofs))) = (src)
#endif
#if !defined(vxor) && defined(vxorf)
#define vxor(dst, a, b) \
(dst) = vxorf((a), (b))
#endif
#if !defined(vxorf) && defined(vxor)
/*
* This requires gcc's "Statement Exprs" extension (also supported by a number
* of other C compilers).
*/
#define vxorf(a, b) \
({ vtype tmp; vxor(tmp, (a), (b)); tmp; })
#endif
#ifndef vnot
#define vnot(dst, a) \
vxor((dst), (a), vones)
#endif
#ifndef vshl1
#define vshl1(dst, src) \
vshl((dst), (src), 1)
#endif
#if !DES_BS_VECTOR_LOOPS && defined(vshl) && defined(vshr)
#define DES_BS_VECTOR_LOOPS_K 0
#define DEPTH_K
#define for_each_depth_k()
#define kvtype vtype
#define kvand vand
#define kvor vor
#define kvshl1 vshl1
#define kvshl vshl
#define kvshr vshr
#else
#if DES_BS_VECTOR
#define DES_BS_VECTOR_LOOPS_K 1
#define DEPTH_K [depth]
#define for_each_depth_k() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#define DES_BS_VECTOR_LOOPS_K 0
#endif
typedef unsigned ARCH_WORD kvtype;
#define kvand(dst, a, b) \
(dst) = (a) & (b)
#define kvor(dst, a, b) \
(dst) = (a) | (b)
#define kvshl1(dst, src) \
(dst) = (src) << 1
#define kvshl(dst, src, shift) \
(dst) = (src) << (shift)
#define kvshr(dst, src, shift) \
(dst) = (src) >> (shift)
#endif
#if !DES_BS_VECTOR || DES_BS_VECTOR_LOOPS_K
#ifdef __x86_64__
#define mask01 0x0101010101010101UL
#elif __i386__
#define mask01 0x01010101UL
#else
#undef mask01
#endif
#ifdef mask01
#define mask02 (mask01 << 1)
#define mask04 (mask01 << 2)
#define mask08 (mask01 << 3)
#define mask10 (mask01 << 4)
#define mask20 (mask01 << 5)
#define mask40 (mask01 << 6)
#define mask80 (mask01 << 7)
#endif
#endif
#ifndef mask01
#define mask01 (*(kvtype *)&DES_bs_all.masks[0])
#define mask02 (*(kvtype *)&DES_bs_all.masks[1])
#define mask04 (*(kvtype *)&DES_bs_all.masks[2])
#define mask08 (*(kvtype *)&DES_bs_all.masks[3])
#define mask10 (*(kvtype *)&DES_bs_all.masks[4])
#define mask20 (*(kvtype *)&DES_bs_all.masks[5])
#define mask40 (*(kvtype *)&DES_bs_all.masks[6])
#define mask80 (*(kvtype *)&DES_bs_all.masks[7])
#endif
#ifdef __i386__
/* register-starved */
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v4 = *(kvtype *)&vp[4];
#define v1 *(kvtype *)&vp[1]
#define v2 *(kvtype *)&vp[2]
#define v3 *(kvtype *)&vp[3]
#define v5 *(kvtype *)&vp[5]
#define v6 *(kvtype *)&vp[6]
#define v7 *(kvtype *)&vp[7]
#else
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v1 = *(kvtype *)&vp[1]; \
kvtype v2 = *(kvtype *)&vp[2]; \
kvtype v3 = *(kvtype *)&vp[3]; \
kvtype v4 = *(kvtype *)&vp[4]; \
kvtype v5 = *(kvtype *)&vp[5]; \
kvtype v6 = *(kvtype *)&vp[6]; \
kvtype v7 = *(kvtype *)&vp[7];
#endif
#define kvand_shl1_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(tmp, tmp); \
kvor(dst, dst, tmp)
#define kvand_shl_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshl(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shl1(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(dst, tmp)
#define kvand_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvor(dst, dst, tmp)
#define kvand_shr_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shr(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(dst, tmp, shift)
#define FINALIZE_NEXT_KEY_BIT_0 { \
kvtype m = mask01, va, vb, tmp; \
kvand(va, v0, m); \
kvand_shl1(vb, v1, m); \
kvand_shl_or(va, v2, m, 2); \
kvand_shl_or(vb, v3, m, 3); \
kvand_shl_or(va, v4, m, 4); \
kvand_shl_or(vb, v5, m, 5); \
kvand_shl_or(va, v6, m, 6); \
kvand_shl_or(vb, v7, m, 7); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_1 { \
kvtype m = mask02, va, vb, tmp; \
kvand_shr(va, v0, m, 1); \
kvand(vb, v1, m); \
kvand_shl1_or(va, v2, m); \
kvand_shl_or(vb, v3, m, 2); \
kvand_shl_or(va, v4, m, 3); \
kvand_shl_or(vb, v5, m, 4); \
kvand_shl_or(va, v6, m, 5); \
kvand_shl_or(vb, v7, m, 6); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_2 { \
kvtype m = mask04, va, vb, tmp; \
kvand_shr(va, v0, m, 2); \
kvand_shr(vb, v1, m, 1); \
kvand_or(va, v2, m); \
kvand_shl1_or(vb, v3, m); \
kvand_shl_or(va, v4, m, 2); \
kvand_shl_or(vb, v5, m, 3); \
kvand_shl_or(va, v6, m, 4); \
kvand_shl_or(vb, v7, m, 5); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_3 { \
kvtype m = mask08, va, vb, tmp; \
kvand_shr(va, v0, m, 3); \
kvand_shr(vb, v1, m, 2); \
kvand_shr_or(va, v2, m, 1); \
kvand_or(vb, v3, m); \
kvand_shl1_or(va, v4, m); \
kvand_shl_or(vb, v5, m, 2); \
kvand_shl_or(va, v6, m, 3); \
kvand_shl_or(vb, v7, m, 4); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_4 { \
kvtype m = mask10, va, vb, tmp; \
kvand_shr(va, v0, m, 4); \
kvand_shr(vb, v1, m, 3); \
kvand_shr_or(va, v2, m, 2); \
kvand_shr_or(vb, v3, m, 1); \
kvand_or(va, v4, m); \
kvand_shl1_or(vb, v5, m); \
kvand_shl_or(va, v6, m, 2); \
kvand_shl_or(vb, v7, m, 3); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_5 { \
kvtype m = mask20, va, vb, tmp; \
kvand_shr(va, v0, m, 5); \
kvand_shr(vb, v1, m, 4); \
kvand_shr_or(va, v2, m, 3); \
kvand_shr_or(vb, v3, m, 2); \
kvand_shr_or(va, v4, m, 1); \
kvand_or(vb, v5, m); \
kvand_shl1_or(va, v6, m); \
kvand_shl_or(vb, v7, m, 2); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_6 { \
kvtype m = mask40, va, vb, tmp; \
kvand_shr(va, v0, m, 6); \
kvand_shr(vb, v1, m, 5); \
kvand_shr_or(va, v2, m, 4); \
kvand_shr_or(vb, v3, m, 3); \
kvand_shr_or(va, v4, m, 2); \
kvand_shr_or(vb, v5, m, 1); \
kvand_or(va, v6, m); \
kvand_shl1_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_7 { \
kvtype m = mask80, va, vb, tmp; \
kvand_shr(va, v0, m, 7); \
kvand_shr(vb, v1, m, 6); \
kvand_shr_or(va, v2, m, 5); \
kvand_shr_or(vb, v3, m, 4); \
kvand_shr_or(va, v4, m, 3); \
kvand_shr_or(vb, v5, m, 2); \
kvand_shr_or(va, v6, m, 1); \
kvand_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
#if DES_BS_EXPAND
{
int index;
for (index = 0; index < 0x300; index++)
for_each_depth_k() {
#if DES_BS_VECTOR_LOOPS_K
DES_bs_all.KS.v[index] DEPTH_K =
DES_bs_all.KSp[index] DEPTH_K;
#else
vst(*(kvtype *)&DES_bs_all.KS.v[index], 0,
*(kvtype *)DES_bs_all.KSp[index]);
#endif
}
}
#endif
}
#endif
#if DES_bs_mt
MAYBE_INLINE void DES_bs_set_salt_for_thread(int t, unsigned int salt)
#else
void DES_bs_set_salt(ARCH_WORD salt)
#endif
{
unsigned int new = salt;
unsigned int old = DES_bs_all.salt;
int dst;
DES_bs_all.salt = new;
for (dst = 0; dst < 24; dst++) {
if ((new ^ old) & 1) {
DES_bs_vector *sp1, *sp2;
int src1 = dst;
int src2 = dst + 24;
if (new & 1) {
src1 = src2;
src2 = dst;
}
sp1 = DES_bs_all.Ens[src1];
sp2 = DES_bs_all.Ens[src2];
DES_bs_all.E.E[dst] = (ARCH_WORD *)sp1;
DES_bs_all.E.E[dst + 24] = (ARCH_WORD *)sp2;
DES_bs_all.E.E[dst + 48] = (ARCH_WORD *)(sp1 + 32);
DES_bs_all.E.E[dst + 72] = (ARCH_WORD *)(sp2 + 32);
}
new >>= 1;
old >>= 1;
if (new == old)
break;
}
}
#if !DES_BS_ASM
/* Include the S-boxes here so that the compiler can inline them */
#if DES_BS == 3
#include "sboxes-s.c"
#elif DES_BS == 2
#include "sboxes.c"
#else
#undef andn
#include "nonstd.c"
#endif
#define b DES_bs_all.B
#define e DES_bs_all.E.E
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#define bd [depth]
#define ed [depth]
#define DEPTH [depth]
#define for_each_depth() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#if DES_BS_EXPAND
#define kd
#else
#define kd [0]
#endif
#define bd
#define ed [0]
#define DEPTH
#define for_each_depth()
#endif
#define DES_bs_clear_block_8(i) \
for_each_depth() { \
vst(b[i] bd, 0, zero); \
vst(b[i] bd, 1, zero); \
vst(b[i] bd, 2, zero); \
vst(b[i] bd, 3, zero); \
vst(b[i] bd, 4, zero); \
vst(b[i] bd, 5, zero); \
vst(b[i] bd, 6, zero); \
vst(b[i] bd, 7, zero); \
}
#define DES_bs_clear_block \
DES_bs_clear_block_8(0); \
DES_bs_clear_block_8(8); \
DES_bs_clear_block_8(16); \
DES_bs_clear_block_8(24); \
DES_bs_clear_block_8(32); \
DES_bs_clear_block_8(40); \
DES_bs_clear_block_8(48); \
DES_bs_clear_block_8(56);
#define DES_bs_set_block_8(i, v0, v1, v2, v3, v4, v5, v6, v7) \
for_each_depth() { \
vst(b[i] bd, 0, v0); \
vst(b[i] bd, 1, v1); \
vst(b[i] bd, 2, v2); \
vst(b[i] bd, 3, v3); \
vst(b[i] bd, 4, v4); \
vst(b[i] bd, 5, v5); \
vst(b[i] bd, 6, v6); \
vst(b[i] bd, 7, v7); \
}
#define x(p) vxorf(*(vtype *)&e[p] ed, *(vtype *)&k[p] kd)
#define y(p, q) vxorf(*(vtype *)&b[p] bd, *(vtype *)&k[q] kd)
#define z(r) ((vtype *)&b[r] bd)
void DES_bs_crypt_25(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = 25;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
iterations--;
goto start;
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
void DES_bs_crypt(int count, int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = count;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(x(12), x(13), x(14), x(15), x(16), x(17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(x(18), x(19), x(20), x(21), x(22), x(23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(x(36), x(37), x(38), x(39), x(40), x(41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(x(42), x(43), x(44), x(45), x(46), x(47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(x(60), x(61), x(62), x(63), x(64), x(65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(x(66), x(67), x(68), x(69), x(70), x(71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(x(84), x(85), x(86), x(87), x(88), x(89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(x(90), x(91), x(92), x(93), x(94), x(95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
if (--iterations) goto start;
#if DES_bs_mt
continue;
#else
return;
#endif
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
#undef x
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_LM(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_LM(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 7; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
FINALIZE_NEXT_KEY_BIT_7
}
}
}
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
int DES_bs_crypt_LM(int *pcount, struct db_salt *salt)
{
int keys_count = *pcount;
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
{
vtype z = vzero, o = vones;
DES_bs_set_block_8(0, z, z, z, z, z, z, z, z);
DES_bs_set_block_8(8, o, o, o, z, o, z, z, z);
DES_bs_set_block_8(16, z, z, z, z, z, z, z, o);
DES_bs_set_block_8(24, z, z, o, z, z, o, o, o);
DES_bs_set_block_8(32, z, z, z, o, z, o, o, o);
DES_bs_set_block_8(40, z, z, z, z, z, o, z, z);
DES_bs_set_block_8(48, o, o, z, z, z, z, o, z);
DES_bs_set_block_8(56, o, z, o, z, o, o, o, o);
}
#if DES_bs_mt
DES_bs_finalize_keys_LM(t);
#else
DES_bs_finalize_keys_LM();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}
return keys_count;
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_plain(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_plain(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
}
#undef v1
#undef v2
#undef v3
#undef v5
#undef v6
#undef v7
/* Single Des Encryption with no salt */
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
#if DES_BS_VECTOR
#define INDX [index]
#else
#define INDX
#endif
void DES_bs_crypt_plain(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count, DES_bs_P)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
int i;
#if DES_BS_VECTOR
int index;
#endif
for (i=0; i<64; i++)
{
#if DES_BS_VECTOR
for (index=0; index<DES_BS_VECTOR_SIZE; index++)
#endif
DES_bs_all.B[i]INDX = DES_bs_P[i]INDX;
}
#if DES_bs_mt
DES_bs_finalize_keys_plain(t);
#else
DES_bs_finalize_keys_plain();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}}
#endif
#ifdef INDX
#undef INDX
#endif
#if DES_BS_VECTOR
#define INDX [k]
#else
#define INDX
#endif
void DES_bs_generate_plaintext(unsigned char *plaintext)
{
int i, j;
#if DES_BS_VECTOR
int k;
#endif
/* Set same plaintext for all bit layers */
for (i = 0; i < 64; i++) {
j = (int) (plaintext[i/8] >> (7-(i%8))) & 0x01;
if (j==1)
j = -1;
#if DES_BS_VECTOR
for (k=0; k<DES_BS_VECTOR_SIZE; k++)
#endif
DES_bs_P[i]INDX = j;
}
}
|
taskloop_tied_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_taskloop_tied_scheduling() {
int i, vals[6];
memset(vals, 0, sizeof(int) * 6);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(4)
{
if (omp_get_thread_num() >= 2) {
timeout_barrier_wait(&barrier, 4);
}
// 6 barrier_waits in tasks and 2 barrier_waits in threads
#pragma omp master
{
check_num_ess(4);
#pragma omp taskloop grainsize(1)
for (i = 0; i < 6; i++) {
timeout_barrier_wait(&barrier, 4);
vals[i] = 1;
}
}
}
for (i = 0; i < 6; i++) {
if (vals[i] != 1) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_taskloop_tied_scheduling()) {
num_failed++;
}
}
return num_failed;
}
|
SpatialZeroPadding.c | #include <string.h>
#include "../thnets.h"
int nnload_SpatialZeroPadding(struct module *mod, struct nnmodule *n)
{
struct table *t = n->table;
mod->type = MT_SpatialZeroPadding;
mod->updateOutput = nn_SpatialZeroPadding_updateOutput;
struct SpatialZeroPadding *m = &mod->SpatialZeroPadding;
m->pad_l = TableGetNumber(t, "pad_l");
m->pad_r = TableGetNumber(t, "pad_r");
m->pad_t = TableGetNumber(t, "pad_t");
m->pad_b = TableGetNumber(t, "pad_b");
return 0;
}
THFloatTensor *nn_SpatialZeroPadding_updateOutput(struct module *module, THFloatTensor *input)
{
int idim = input->nDimension;
if(idim != 3 && idim != 4)
THError("input dimension must be 3 or 4");
int pad_l = module->SpatialZeroPadding.pad_l;
int pad_r = module->SpatialZeroPadding.pad_r;
int pad_t = module->SpatialZeroPadding.pad_t;
int pad_b = module->SpatialZeroPadding.pad_b;
int iw = (int)input->size[idim-1];
int ih = (int)input->size[idim-2];
int ow = iw + pad_l + pad_r;
int oh = ih + pad_t + pad_b;
int ix1 = pad_l < 0 ? -pad_l : 0;
int iy1 = pad_t < 0 ? -pad_t : 0;
int ix2 = pad_r < 0 ? iw + pad_r : iw;
int iy2 = pad_b < 0 ? ih + pad_b : ih;
if(idim == 3)
THFloatTensor_resize3d(module->output, input->size[0], oh, ow);
else THFloatTensor_resize4d(module->output, input->size[0], input->size[1], oh, ow);
int batchsize = idim == 4 ? (int)input->size[0] : 1;
int batch, plane, y;
int istride = (int)input->size[idim-2];
#pragma omp parallel for private(batch)
for(batch = 0; batch < batchsize; batch++)
for(plane = 0; plane < input->size[idim - 3]; plane++)
{
float *in = THFloatTensor_data(input) + batch * input->stride[0] + plane * input->stride[idim-3];
float *out = THFloatTensor_data(module->output) + batch * module->output->stride[0] + plane * module->output->stride[idim-3];
if(pad_t > 0)
memset(out, 0, ow * pad_t * sizeof(*out));
if(pad_b > 0)
memset(out + (pad_t + ih) * ow, 0, ow * pad_b * sizeof(*out));
for(y = iy1; y < iy2; y++)
{
if(pad_l > 0)
memset(out + (y + pad_t) * ow, 0, pad_l * sizeof(*out));
if(pad_r > 0)
memset(out + (y + pad_t) * ow + pad_l + ow, 0, pad_r * sizeof(*out));
memcpy(out + (y + pad_t) * ow + (pad_l < 0 ? 0 : pad_l), in + y * istride + ix1, (ix2-ix1) * sizeof(*out));
}
}
return module->output;
}
|
SpatialConvolutionMM.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c"
#else
/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */
static void nn_(unfolded_acc)(THTensor *finput, THTensor *input,
int kW, int kH,
int nInputPlane,
int inputWidth, int inputHeight,
int outputWidth, int outputHeight)
{
int nip;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(nip)
for(nip = 0; nip < nInputPlane; nip++)
{
int kw, kh, y;
for(kh = 0; kh < kH; kh++)
{
for(kw = 0; kw < kW; kw++)
{
real *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth);
real *dst = input_data + nip*(inputHeight*inputWidth) + kh*inputWidth + kw;
for(y = 0; y < outputHeight; y++)
THVector_(add)(dst+y*inputWidth, src+y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */
}
}
}
}
static void nn_(unfolded_copy)(THTensor *finput, THTensor *input,
int kW, int kH,
int nInputPlane,
int inputWidth, int inputHeight,
int outputWidth, int outputHeight)
{
long k;
real *input_data = THTensor_(data)(input);
real *finput_data = THTensor_(data)(finput);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane*kH*kW; k++)
{
int nip = k / (kH*kW);
int rest = k % (kH*kW);
int kh = rest / kW;
int kw = rest % kW;
int y;
real *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth);
real *src = input_data + nip*(inputHeight*inputWidth) + kh*inputWidth + kw;
for(y = 0; y < outputHeight; y++)
memcpy(dst+y*outputWidth, src+y*inputWidth, sizeof(real)*outputWidth);
}
}
static void nn_(SpatialConvolutionMM_updateOutput_frame)(THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput,
int kW, int kH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
long i;
nn_(unfolded_copy)(finput, input, kW, kH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight);
THTensor *output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset,
nOutputPlane, -1,
outputHeight*outputWidth, -1);
for(i = 0; i < nOutputPlane; i++)
THVector_(fill)(output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputHeight*outputWidth);
THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput);
THTensor_(free)(output2d);
}
static int nn_(SpatialConvolutionMM_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected");
int dimf = 0;
int dimw = 2;
int dimh = 1;
if (input->nDimension == 4) {
dimf++;
dimw++;
dimh++;
}
long nInputPlane = input->size[dimf];
long inputWidth = input->size[dimw];
long inputHeight = input->size[dimh];
long nOutputPlane = weight->size[0];
long outputWidth = (inputWidth - kW) + 1;
long outputHeight = (inputHeight - kH) + 1;
if(input->nDimension == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
nn_(SpatialConvolutionMM_updateOutput_frame)(input, output, weight, bias, finput,
kW, kH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
long T = input->size[0];
long t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
THStorage_(clearFlag)(input->storage, TH_STORAGE_REFCOUNTED);
THStorage_(clearFlag)(output->storage, TH_STORAGE_REFCOUNTED);
THStorage_(clearFlag)(finput->storage, TH_STORAGE_REFCOUNTED);
// mkl_set_num_threads(1);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
nn_(SpatialConvolutionMM_updateOutput_frame)(input_t, output_t, weight, bias, finput_t,
kW, kH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
THStorage_(setFlag)(input->storage, TH_STORAGE_REFCOUNTED);
THStorage_(setFlag)(output->storage, TH_STORAGE_REFCOUNTED);
THStorage_(setFlag)(finput->storage, TH_STORAGE_REFCOUNTED);
}
// mkl_set_num_threads(4);
return 1;
}
static void nn_(SpatialConvolutionMM_updateGradInput_frame)(THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput,
int kW, int kH)
{
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2], -1);
THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
THTensor_(free)(gradOutput2d);
THTensor_(zero)(gradInput);
nn_(unfolded_acc)(fgradInput, gradInput, kW, kH, gradInput->size[0], gradInput->size[2], gradInput->size[1], gradOutput->size[2], gradOutput->size[1]);
}
static int nn_(SpatialConvolutionMM_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor);
THTensor *fgradInput = luaT_getfieldcheckudata(L, 1, "fgradInput", torch_Tensor);
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" );
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
THTensor_(transpose)(weight, weight, 0, 1);
if(input->nDimension == 3)
{
nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, weight, fgradInput, kW, kH);
}
else
{
long T = input->size[0];
long t;
THStorage_(clearFlag)(gradInput->storage, TH_STORAGE_REFCOUNTED);
THStorage_(clearFlag)(gradOutput->storage, TH_STORAGE_REFCOUNTED);
THStorage_(clearFlag)(fgradInput->storage, TH_STORAGE_REFCOUNTED);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, weight, fgradInput_t, kW, kH);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
THStorage_(setFlag)(gradInput->storage, TH_STORAGE_REFCOUNTED);
THStorage_(setFlag)(gradOutput->storage, TH_STORAGE_REFCOUNTED);
THStorage_(setFlag)(fgradInput->storage, TH_STORAGE_REFCOUNTED);
}
THTensor_(transpose)(weight, weight, 0, 1);
return 1;
}
static void nn_(SpatialConvolutionMM_accGradParameters_frame)(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput,
real scale)
{
long i;
THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2], -1);
THTensor_(transpose)(finput, finput, 0, 1);
THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, finput);
THTensor_(transpose)(finput, finput, 0, 1);
THTensor *gradOutputPlane = THTensor_(new)();
for(i = 0; i < gradBias->size[0]; i++)
{
long k;
real sum = 0;
real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0];
for(k = 0; k < gradOutput2d->size[1]; k++)
sum += data[k];
(gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum;
}
THTensor_(free)(gradOutputPlane);
THTensor_(free)(gradOutput2d);
}
static int nn_(SpatialConvolutionMM_accGradParameters)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
real scale = luaL_optnumber(L, 4, 1);
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor);
THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor);
THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor);
THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" );
if(input->nDimension == 3)
{
nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale);
}
else
{
long T = input->size[0];
long t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
return 0;
}
static const struct luaL_Reg nn_(SpatialConvolutionMM__) [] = {
{"SpatialConvolutionMM_updateOutput", nn_(SpatialConvolutionMM_updateOutput)},
{"SpatialConvolutionMM_updateGradInput", nn_(SpatialConvolutionMM_updateGradInput)},
{"SpatialConvolutionMM_accGradParameters", nn_(SpatialConvolutionMM_accGradParameters)},
{NULL, NULL}
};
static void nn_(SpatialConvolutionMM_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(SpatialConvolutionMM__), "nn");
lua_pop(L,1);
}
#endif
|
gi_regular_grid_trilinear_function_uncached.h | /*
*
* Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu>
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#ifndef REGULAR_GRID_TRILINEAR_FUNCTION_UNCACHED
#define REGULAR_GRID_TRILINEAR_FUNCTION_UNCACHED
#include <algorithm>
#include <cmath>
#include "base/gi_basic_types.h"
#include "base/gi_vectors.h"
#include "base/gi_regular_grid_3d.h"
namespace GInt {
class UncachedRegularGridTrilinearFunction {
protected:
RegularGrid3D * m_grid;
FLOATTYPE* m_image;
FLOATTYPE m_min_value;
FLOATTYPE m_max_value;
bool m_i_made_gradient;
bool m_i_made_image;
void fill_extents() {
FLOATTYPE t_max_val = m_max_value = m_image[0];
FLOATTYPE t_min_val = m_min_value = m_image[0];
INDEX_TYPE num_elements = m_grid->NumElements();
INDEX_TYPE ii;
#pragma omp parallel shared(num_elements) private(ii) firstprivate(t_max_val,t_min_val)
{
#pragma omp for nowait
for (ii = 0; ii<num_elements; ++ii)
{
if (m_image[ii] > t_max_val)
{
t_max_val = m_image[ii];
}
if (m_image[ii] < t_min_val)
{
t_min_val = m_image[ii];
}
}
#pragma omp critical
{
if (t_max_val > m_max_value) m_max_value = t_max_val;
if (t_min_val < m_min_value) m_min_value = t_min_val;
}
}
}
public:
FLOATTYPE GetMinValue() const { return m_min_value; }
FLOATTYPE GetMaxValue() const { return m_max_value; }
UncachedRegularGridTrilinearFunction(RegularGrid3D* grid, FLOATTYPE *image = 0) : m_grid(grid) {
m_i_made_image = false;
m_i_made_gradient = false;
m_image = NULL;
// use the function if it is passed, otherwise simply allocate memory
if(image != 0) { m_image = image; }
//m_grad = new Vec3d[m_grid->NumElements()];
}
~UncachedRegularGridTrilinearFunction() {
if (m_i_made_image) delete[] m_image;
}
// return pointer to underlying mesh and function
const RegularGrid3D* GetGrid() const { return m_grid; }
FLOATTYPE* GetImage() const { return m_image; }
// sample the image at integral location
FLOATTYPE SampleImage(const Vec3l& p) const {
return m_image[m_grid->Index3d(p)];
}
// sample the image at integral location
FLOATTYPE SampleImage(const INDEX_TYPE id) const {
return m_image[id];
}
static const FLOATTYPE kRKCoefficients[5][9];
Vec3d GradientFromImage(const Vec3l& p, int rklevel) const {
Vec3l negs[9]; // don't support more than 4th order - cmon. would be ridiculous
double res_x = 0.0;
int rklevel_x = m_grid->Gather1DNeighborhood(p, 0, rklevel, negs);
int nume_x = rklevel_x * 2 + 1; // number of entries to average
for (int i = 0; i < nume_x; i++) {
res_x += kRKCoefficients[rklevel_x][i] * SampleImage(negs[i]);
}
double res_y = 0.0;
int rklevel_y = m_grid->Gather1DNeighborhood(p, 1, rklevel, negs);
int nume_y = rklevel_y * 2 + 1; // number of entries to average
for (int i = 0; i < nume_y; i++) {
res_y += kRKCoefficients[rklevel_y][i] * SampleImage(negs[i]);
}
double res_z = 0.0;
int rklevel_z = m_grid->Gather1DNeighborhood(p, 2, rklevel, negs);
int nume_z = rklevel_z * 2 + 1; // number of entries to average
for (int i = 0; i < nume_z; i++) {
res_z += kRKCoefficients[rklevel_z][i] * SampleImage(negs[i]);
}
return Vec3d(res_x, res_y, res_z);
}
// sample the gradient at integral location
const Vec3d& SampleGrad(const Vec3l& p) const {
return GradientFromImage(p, 1);
}
FLOATTYPE TriLinInterpValue(const Vec3d& s) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
Vec3d b = n[0];
//s.print_vf();
//b.print_vf();
Vec3d factors = s - b;
FLOATTYPE x0 = (1 - factors[0]) * SampleImage(n[0]) + SampleImage(n[1]) * factors[0];
FLOATTYPE x1 = (1 - factors[0]) * SampleImage(n[2]) + SampleImage(n[3]) * factors[0];
FLOATTYPE x2 = (1 - factors[0]) * SampleImage(n[4]) + SampleImage(n[5]) * factors[0];
FLOATTYPE x3 = (1 - factors[0]) * SampleImage(n[6]) + SampleImage(n[7]) * factors[0];
FLOATTYPE y0 = (1 - factors[1]) *x0 + x1 * factors[1];
FLOATTYPE y1 = (1 - factors[1]) *x2 + x3 * factors[1];
return (1 - factors[2]) *y0 + y1 * factors[2];
}
// return trilinearly interpolated value
Vec3d TriLinInterpGrad(const Vec3d& s) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
Vec3d b = n[0];
//s.print_vf();
//b.print_vf();
Vec3d factors = s - b;
Vec3d x0 = Vec3d::Lerp(SampleGrad(n[0]), SampleGrad(n[1]), factors[0]);
Vec3d x1 = Vec3d::Lerp(SampleGrad(n[2]), SampleGrad(n[3]), factors[0]);
Vec3d x2 = Vec3d::Lerp(SampleGrad(n[4]), SampleGrad(n[5]), factors[0]);
Vec3d x3 = Vec3d::Lerp(SampleGrad(n[6]), SampleGrad(n[7]), factors[0]);
Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]);
Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]);
return Vec3d::Lerp(y0, y1, factors[2]);
}
void SetGradExplicit(INDEX_TYPE id, Vec3d vec) {
//this->m_grad[id] = vec;
}
// fill in vals with the 8 values of hte gradient around sample poitn
void GetGradSurrounding(const Vec3d& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
void GetGradSurrounding(const Vec3l& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
// use with extreme care - no boundary checks, only do on really interior poitns
void GetGradSurroundingNoBoundaryCheck(const Vec3d& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurroundingNoBoundaryCheck(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
FLOATTYPE InterpolatedValue(const Vec3d& s) const {
return TriLinInterpValue(s);
}
Vec3d InterpolatedGrad(const Vec3d& s) const {
return TriLinInterpGrad(s);
}
// allow reuse of sampled gradient - the assumption that vals has the gradient arrows around s
Vec3d TriLinInterpGrad(const Vec3d& s, const Vec3l& int_base, Vec3d* vals) const {
//if (!(s.IntFloor() == int_base)) {
// printf("s="); s.PrintFloat(); printf("d="); int_base.PrintFloat();
//}
//
//Vec3d d = int_base.IntFloor();
Vec3d factors = s - int_base;
Vec3d x0 = Vec3d::Lerp(vals[0], vals[1], factors[0]);
Vec3d x1 = Vec3d::Lerp(vals[2], vals[3], factors[0]);
Vec3d x2 = Vec3d::Lerp(vals[4], vals[5], factors[0]);
Vec3d x3 = Vec3d::Lerp(vals[6], vals[7], factors[0]);
Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]);
Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]);
return Vec3d::Lerp(y0, y1, factors[2]);
}
void LoadImageFromFloatFile(const char* fname) {
size_t image_size = m_grid->NumElements();
// fill in image
m_image = new FLOATTYPE[image_size]; m_i_made_image = true;
FILE* fin = fopen(fname, "rb");
for (INDEX_TYPE i = 0; i < image_size; i++) {
float tval = 0;
fread(&tval, sizeof(float), 1, fin);
m_image[i] = tval;
}
fclose(fin);
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void LoadImageFromFile(const char* fname) {
size_t image_size = m_grid->NumElements();
// fill in image
m_image = new FLOATTYPE[image_size]; m_i_made_image = true;
FILE* fin = fopen(fname, "rb");
fread(m_image, sizeof(FLOATTYPE), image_size, fin);
fclose(fin);
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void ShallowCopyImage(FLOATTYPE *image) {
m_image = image;
INDEX_TYPE image_size = m_grid->NumElements();
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void DeepCopyImage(const FLOATTYPE *image) {
m_image = new FLOATTYPE[m_grid->NumElements()]; m_i_made_image = true;
INDEX_TYPE image_size = m_grid->NumElements();
memcpy(m_image, image, image_size*sizeof(FLOATTYPE));
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
inline bool IsGreater(INDEX_TYPE a, INDEX_TYPE b) const {
if (m_image[a] > m_image[b]) return true;
if (m_image[b] > m_image[a]) return false;
//if (a == b) printf("WHOA THERE NELLY\n");
return a > b;
}
//Vec3d IStep(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const {
// return m_grid->Inbounds(p + (grad * h));
//}
//Vec3d IStepNoBoundaryCheck(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const {
// return p + (grad * h);
//}
// add in block structure
void ComputeGradFromImage(int rklevel) {
}
void Negate() {
#pragma omp parallel for schedule(static)
for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) {
this->m_image[i] *= -1;
}
}
};
};
#endif
|
utils.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <utils.h>
void print_mat(int n , int** mat) {
int i,j;
for (i = 0; i < n; i++){
printf("%X: ",mat[i]);
for (j = 0; j < n; j++){
printf("%d ",mat[i][j]);
}
printf("\n");
}
printf("\n");
}
void free_mat(int n, int** mat) {
int i;
for(i = 0; i < n; i++) {
free(mat[i]);
}
free(mat);
}
int** make_rand_mat(int n, int max_val) {
double begin,end;
int i,j;
int** mat = malloc(sizeof(int*)*n);
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
#pragma omp parallel for private(i,j) firstprivate (n)
for (i = 0; i < n; i++) {
mat[i] = malloc(sizeof(int)*n);
#pragma omp parallel for private(j)
for (j = 0; j < n; j++) {
mat[i][j] = rand() % max_val;
}
}
end = omp_get_wtime();
printf("matrix initialization with random numbers took %lf seconds\n", end - begin);
return mat;
}
int* make_rand_mat_1d(int n, int max_val) {
double begin,end;
int* mat = malloc(n*n*sizeof(int));
int i;
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
#pragma omp parallel for private(i) firstprivate (n)
for (i = 0; i < n*n; i++) {
mat[i] = rand() % max_val;
}
end = omp_get_wtime();
printf("matrix initialization with random numbers took %lf seconds\n", end - begin);
return mat;
}
int** make_zero_mat(int n) {
double begin,end;
int i,j;
int** mat = malloc (sizeof(int*)*n);
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
#pragma omp parallel for private(i,j) firstprivate (n)
for (i = 0; i < n; i++) {
mat[i] = calloc(n, sizeof(int));
}
end = omp_get_wtime();
printf("matrix initialization with zeros took %lf seconds\n", end - begin);
return mat;
}
int* make_zero_mat_1d(int n) {
double begin,end;
int i;
int* mat = malloc (n*n*sizeof(int*));
begin = omp_get_wtime();
srand(time(NULL)); // generate rand seed from current time
mat = calloc(n*n, sizeof(int));
end = omp_get_wtime();
printf("matrix initialization with zeros took %lf seconds\n", end - begin);
return mat;
}
int compare_pat(int n, int* bad_i, int* bad_j, int** mat1, int** mat2) {
int i,j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(mat1[i][j] - mat2[i][j]) {
*bad_i = i;
*bad_j = j;
return 1;
}
}
}
return 0;
}
|
GB_unaryop__abs_fp64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_int8
// op(A') function: GB_tran__abs_fp64_int8
// C type: double
// A type: int8_t
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_int8
(
double *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vect-simd-clone-4.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#ifndef N
#define N 1024
#endif
float d[N];
int e[N];
unsigned short f[N];
#pragma omp declare simd simdlen(8) notinbranch uniform(b)
__attribute__((noinline)) float
foo (float a, float b, float c)
{
if (a < 30)
return 5.0f;
return a + b + c;
}
__attribute__((noinline, noclone)) void
bar ()
{
int i;
#pragma omp simd
for (i = 0; i < N; ++i)
{
d[i] = foo (i, 123, i * 3);
e[i] = e[i] * 3;
f[i] = f[i] + 1;
}
}
int
main ()
{
int i;
check_vect ();
bar ();
for (i = 0; i < N; i++)
if (d[i] != (i < 30 ? 5.0f : i * 4 + 123.0f) || e[i] || f[i] != 1)
abort ();
return 0;
}
|
streaming_rrr_generator.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright 2018 Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_STREAMING_RRR_GENERATOR_H
#define RIPPLES_STREAMING_RRR_GENERATOR_H
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstdlib>
#include <memory>
#include <sstream>
#include <unordered_map>
#include <vector>
#include "omp.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/spdlog.h"
#include "trng/uniform_int_dist.hpp"
#include "ripples/imm_execution_record.h"
#ifdef RIPPLES_ENABLE_CUDA
#include "ripples/cuda/cuda_generate_rrr_sets.h"
#include "ripples/cuda/from_nvgraph/imm/bfs.hxx"
#endif
#if CUDA_PROFILE
#include <chrono>
#endif
namespace ripples {
int streaming_command_line(std::unordered_map<size_t, size_t> &worker_to_gpu,
size_t streaming_workers,
size_t streaming_gpu_workers,
std::string gpu_mapping_string) {
auto console = spdlog::get("console");
if (!(streaming_workers > 0 && streaming_gpu_workers <= streaming_workers)) {
console->error("invalid number of streaming workers");
return -1;
}
#ifdef RIPPLES_ENABLE_CUDA
auto num_gpus = cuda_num_devices();
if (!gpu_mapping_string.empty()) {
size_t gpu_id = 0;
std::istringstream iss(gpu_mapping_string);
std::string token;
while (worker_to_gpu.size() < streaming_gpu_workers &&
std::getline(iss, token, ',')) {
std::stringstream omp_num_ss(token);
size_t omp_num;
omp_num_ss >> omp_num;
if (!(omp_num < streaming_workers)) {
console->error("invalid worker in worker-to-GPU mapping: {}", omp_num);
return -1;
}
if (worker_to_gpu.find(omp_num) != worker_to_gpu.end()) {
console->error("duplicated worker-to-GPU mapping: {}", omp_num);
return -1;
}
worker_to_gpu[omp_num] = gpu_id++;
if (gpu_id == num_gpus) gpu_id = 0;
}
if (worker_to_gpu.size() < streaming_gpu_workers) {
console->error("GPU mapping string is too short");
return -1;
}
} else {
// by default, map GPU workers after CPU workers
size_t gpu_id = 0;
size_t omp_num = streaming_workers - streaming_gpu_workers;
for (; omp_num < streaming_workers; ++omp_num) {
worker_to_gpu[omp_num] = gpu_id++;
if (gpu_id == num_gpus) gpu_id = 0;
}
}
#else // RIPPLES_ENABLE_CUDA
assert(streaming_gpu_workers == 0);
#endif // RIPPLES_ENABLE_CUDA
return 0;
}
template <typename GraphTy, typename ItrTy>
class WalkWorker {
using vertex_t = typename GraphTy::vertex_type;
public:
WalkWorker(const GraphTy &G) : G_(G) {}
virtual ~WalkWorker() {}
virtual void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin,
ItrTy end) = 0;
protected:
const GraphTy &G_;
#if CUDA_PROFILE
public:
virtual void begin_prof_iter() = 0;
virtual void prof_record(typename IMMExecutionRecord::walk_iteration_prof &,
size_t) = 0;
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class CPUWalkWorker : public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
public:
CPUWalkWorker(const GraphTy &G, const PRNGeneratorTy &rng)
: WalkWorker<GraphTy, ItrTy>(G), rng_(rng), u_(0, G.num_nodes()) {}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
size_t offset = 0;
while ((offset = mpmc_head.fetch_add(batch_size_)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size_);
if (last > end) last = end;
batch(first, last);
}
}
private:
static constexpr size_t batch_size_ = 32;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
auto local_rng = rng_;
auto local_u = u_;
for (;first != last; ++first) {
vertex_t root = local_u(local_rng);
AddRRRSet(this->G_, root, local_rng, *first, diff_model_tag{});
}
rng_ = local_rng;
u_ = local_u;
#if CUDA_PROFILE
auto &p(prof_bd.back());
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
p.n_ += size;
#endif
}
#if CUDA_PROFILE
public:
struct iter_profile_t {
size_t n_{0};
std::chrono::nanoseconds d_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_)
console->info(
"n-sets={}\tns={}\tb={}", p.n_, p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::cpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
r.CPUWalks.push_back(res);
}
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class GPUWalkWorker;
#ifdef RIPPLES_ENABLE_CUDA
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy>
class GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, linear_threshold_tag>
: public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
public:
struct config_t {
config_t(size_t) {
auto console = spdlog::get("console");
assert(num_threads_ % block_size_ == 0);
max_blocks_ = num_threads_ / block_size_;
#if CUDA_PROFILE
console->info(
"> [GPUWalkWorkerLT::config_t] "
"block_size_={}\tnum_threads_={}\tmax_blocks_={}",
block_size_, num_threads_, max_blocks_);
#endif
}
size_t num_gpu_threads() const { return num_threads_; }
// configuration parameters
static constexpr size_t block_size_ = 256;
static constexpr size_t num_threads_ = 1 << 15;
const size_t mask_words_ = 8; // maximum walk size
// inferred configuration
size_t max_blocks_{0};
};
GPUWalkWorker(const config_t &conf, const GraphTy &G,
const PRNGeneratorTy &rng,
std::shared_ptr<cuda_ctx<GraphTy>> ctx)
: WalkWorker<GraphTy, ItrTy>(G),
conf_(conf),
rng_(rng),
u_(0, G.num_nodes()),
cuda_ctx_(ctx) {
cuda_set_device(ctx->gpu_id);
cuda_stream_create(&cuda_stream_);
// allocate host/device memory
auto mask_size = conf.mask_words_ * sizeof(mask_word_t);
lt_res_mask_ = (mask_word_t *)malloc(conf_.num_gpu_threads() * mask_size);
cuda_malloc((void **)&d_lt_res_mask_, conf_.num_gpu_threads() * mask_size);
// allocate device-size RNGs
cuda_malloc((void **)&d_trng_state_,
conf_.num_gpu_threads() * sizeof(PRNGeneratorTy));
}
~GPUWalkWorker() {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_stream_destroy(cuda_stream_);
// free host/device memory
free(lt_res_mask_);
cuda_free(d_lt_res_mask_);
cuda_free(d_trng_state_);
}
void rng_setup(const PRNGeneratorTy &master_rng, size_t num_seqs,
size_t first_seq) {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_lt_rng_setup(d_trng_state_, master_rng, num_seqs, first_seq,
conf_.max_blocks_, conf_.block_size_);
}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
cuda_set_device(cuda_ctx_->gpu_id);
size_t offset = 0;
auto batch_size = conf_.num_gpu_threads();
while ((offset = mpmc_head.fetch_add(batch_size)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size);
if (last > end) last = end;
batch(first, last);
}
}
private:
config_t conf_;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
cudaStream_t cuda_stream_;
std::shared_ptr<cuda_ctx<GraphTy>> cuda_ctx_;
// memory buffers
mask_word_t *lt_res_mask_, *d_lt_res_mask_;
PRNGeneratorTy *d_trng_state_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
cuda_lt_kernel(conf_.max_blocks_, conf_.block_size_, size,
this->G_.num_nodes(), d_trng_state_, d_lt_res_mask_,
conf_.mask_words_, cuda_ctx_.get(), cuda_stream_);
#if CUDA_PROFILE
cuda_sync(cuda_stream_);
auto t1 = std::chrono::high_resolution_clock::now();
p.dwalk_ +=
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - start);
auto t0 = t1;
#endif
cuda_d2h(lt_res_mask_, d_lt_res_mask_,
size * conf_.mask_words_ * sizeof(mask_word_t), cuda_stream_);
cuda_sync(cuda_stream_);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dd2h_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
batch_lt_build(first, size);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dbuild_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
#endif
#if CUDA_PROFILE
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - start);
p.n_ += size;
#endif
}
void batch_lt_build(ItrTy first, size_t batch_size) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
#endif
for (size_t i = 0; i < batch_size; ++i, ++first) {
auto &rrr_set(*first);
rrr_set.reserve(conf_.mask_words_);
auto res_mask = lt_res_mask_ + (i * conf_.mask_words_);
if (res_mask[0] != this->G_.num_nodes()) {
// valid walk
for (size_t j = 0;
j < conf_.mask_words_ && res_mask[j] != this->G_.num_nodes();
++j) {
rrr_set.push_back(res_mask[j]);
}
} else {
// invalid walk
#if CUDA_PROFILE
p.num_exceedings_++;
#endif
auto root = res_mask[1];
AddRRRSet(this->G_, root, rng_, rrr_set,
ripples::linear_threshold_tag{});
}
std::stable_sort(rrr_set.begin(), rrr_set.end());
}
}
#if CUDA_PROFILE
struct iter_profile_t {
size_t n_{0}, num_exceedings_{0};
std::chrono::nanoseconds d_{0}, dwalk_{0}, dd2h_{0}, dbuild_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
public:
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_) {
console->info(
"n-sets={}\tn-exc={}\tns={}\tb={}", p.n_, p.num_exceedings_,
p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
console->info("walk={}\td2h={}\tbuild={}", p.dwalk_.count(),
p.dd2h_.count(), p.dbuild_.count());
console->info("n. exceedings={} (/{}={})", p.num_exceedings_, p.n_,
(float)p.num_exceedings_ / p.n_);
} else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::gpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
res.Kernel = std::chrono::duration_cast<decltype(res.Kernel)>(p.dwalk_);
res.D2H = std::chrono::duration_cast<decltype(res.D2H)>(p.dd2h_);
res.Post = std::chrono::duration_cast<decltype(res.Post)>(p.dbuild_);
r.GPUWalks.push_back(res);
}
#endif
};
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy>
class GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, independent_cascade_tag>
: public WalkWorker<GraphTy, ItrTy> {
using vertex_t = typename GraphTy::vertex_type;
using bfs_solver_t = nvgraph::Bfs<int, PRNGeneratorTy>;
public:
struct config_t {
config_t(size_t num_workers)
: block_size_(bfs_solver_t::traverse_block_size()),
max_blocks_(num_workers ? cuda_max_blocks() / num_workers : 0) {
auto console = spdlog::get("console");
console->info(
"> [GPUWalkWorkerIC::config_t] "
"max_blocks_={}\tblock_size_={}",
max_blocks_, block_size_);
}
size_t num_gpu_threads() const { return max_blocks_ * block_size_; }
const size_t max_blocks_;
const size_t block_size_;
};
GPUWalkWorker(const config_t &conf, const GraphTy &G,
const PRNGeneratorTy &rng,
std::shared_ptr<cuda_ctx<GraphTy>> ctx)
: WalkWorker<GraphTy, ItrTy>(G),
conf_(conf),
rng_(rng),
u_(0, G.num_nodes()),
cuda_ctx_(ctx) {
cuda_set_device(ctx->gpu_id);
cuda_stream_create(&cuda_stream_);
// allocate host/device memory
ic_predecessors_ = (int *)malloc(
G.num_nodes() * sizeof(typename cuda_device_graph<GraphTy>::vertex_t));
cuda_malloc(
(void **)&d_ic_predecessors_,
G.num_nodes() * sizeof(typename cuda_device_graph<GraphTy>::vertex_t));
// allocate device-size RNGs
cuda_malloc((void **)&d_trng_state_,
conf_.num_gpu_threads() * sizeof(PRNGeneratorTy));
// create the solver
solver_ = new bfs_solver_t(
this->G_.num_nodes(), this->G_.num_edges(),
cuda_graph_index(cuda_ctx_.get()), cuda_graph_edges(cuda_ctx_.get()),
cuda_graph_weights(cuda_ctx_.get()), true, TRAVERSAL_DEFAULT_ALPHA,
TRAVERSAL_DEFAULT_BETA, conf_.max_blocks_, cuda_stream_);
solver_->configure(nullptr, d_ic_predecessors_, nullptr);
}
~GPUWalkWorker() {
cuda_set_device(cuda_ctx_->gpu_id);
delete solver_;
cuda_stream_destroy(cuda_stream_);
// free host/device memory
free(ic_predecessors_);
cuda_free(d_ic_predecessors_);
cuda_free(d_trng_state_);
}
void rng_setup(const PRNGeneratorTy &master_rng, size_t num_seqs,
size_t first_seq) {
cuda_set_device(cuda_ctx_->gpu_id);
cuda_ic_rng_setup(d_trng_state_, master_rng, num_seqs, first_seq,
conf_.max_blocks_, conf_.block_size_);
solver_->rng(d_trng_state_);
}
void svc_loop(std::atomic<size_t> &mpmc_head, ItrTy begin, ItrTy end) {
// set device and stream
cuda_set_device(cuda_ctx_->gpu_id);
size_t offset = 0;
while ((offset = mpmc_head.fetch_add(batch_size_)) <
std::distance(begin, end)) {
auto first = begin;
std::advance(first, offset);
auto last = first;
std::advance(last, batch_size_);
if (last > end) last = end;
batch(first, last);
}
}
private:
static constexpr size_t batch_size_ = 32;
config_t conf_;
PRNGeneratorTy rng_;
trng::uniform_int_dist u_;
// CUDA context
cudaStream_t cuda_stream_;
std::shared_ptr<cuda_ctx<GraphTy>> cuda_ctx_;
// nvgraph machinery
bfs_solver_t *solver_;
// memory buffers
typename cuda_device_graph<GraphTy>::vertex_t *ic_predecessors_,
*d_ic_predecessors_;
PRNGeneratorTy *d_trng_state_;
void batch(ItrTy first, ItrTy last) {
#if CUDA_PROFILE
auto &p(prof_bd.back());
auto start = std::chrono::high_resolution_clock::now();
#endif
auto size = std::distance(first, last);
for (size_t wi = 0; wi < size; ++wi) {
#if CUDA_PROFILE
auto t0 = std::chrono::high_resolution_clock::now();
#endif
auto root = u_(rng_);
solver_->traverse(reinterpret_cast<int>(root));
#if CUDA_PROFILE
cuda_sync(cuda_stream_);
auto t1 = std::chrono::high_resolution_clock::now();
p.dwalk_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
cuda_d2h(ic_predecessors_, d_ic_predecessors_,
this->G_.num_nodes() *
sizeof(typename cuda_device_graph<GraphTy>::vertex_t),
cuda_stream_);
cuda_sync(cuda_stream_);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dd2h_ += std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
t0 = t1;
#endif
ic_predecessors_[root] = root;
ic_build(first++);
#if CUDA_PROFILE
t1 = std::chrono::high_resolution_clock::now();
p.dbuild_ +=
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0);
#endif
}
#if CUDA_PROFILE
p.d_ += std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
p.n_ += size;
#endif
}
void ic_build(ItrTy dst) {
auto &rrr_set(*dst);
for (vertex_t i = 0; i < this->G_.num_nodes(); ++i)
if (ic_predecessors_[i] != -1) rrr_set.push_back(i);
}
#if CUDA_PROFILE
struct iter_profile_t {
size_t n_{0};
std::chrono::nanoseconds d_{0}, dwalk_{0}, dd2h_{0}, dbuild_{0};
};
using profile_t = std::vector<iter_profile_t>;
profile_t prof_bd;
public:
void begin_prof_iter() { prof_bd.emplace_back(); }
void print_prof_iter(size_t i) {
auto console = spdlog::get("console");
assert(i < prof_bd.size());
auto &p(prof_bd[i]);
if (p.n_) {
console->info(
"n-sets={}\tns={}\tb={}", p.n_, p.d_.count(),
(float)p.n_ * 1e03 /
std::chrono::duration_cast<std::chrono::milliseconds>(p.d_)
.count());
console->info("walk={}\td2h={}\tbuild={}", p.dwalk_.count(),
p.dd2h_.count(), p.dbuild_.count());
} else
console->info("> idle worker");
}
void prof_record(typename IMMExecutionRecord::walk_iteration_prof &r,
size_t i) {
assert(i < prof_bd.size());
typename IMMExecutionRecord::gpu_walk_prof res;
auto &p(prof_bd[i]);
res.NumSets = p.n_;
res.Total = std::chrono::duration_cast<decltype(res.Total)>(p.d_);
res.Kernel = std::chrono::duration_cast<decltype(res.Kernel)>(p.dwalk_);
res.D2H = std::chrono::duration_cast<decltype(res.D2H)>(p.dd2h_);
res.Post = std::chrono::duration_cast<decltype(res.Post)>(p.dbuild_);
r.GPUWalks.push_back(res);
}
#endif
};
#endif // RIPPLES_ENABLE_CUDA
template <typename GraphTy, typename PRNGeneratorTy, typename ItrTy,
typename diff_model_tag>
class StreamingRRRGenerator {
using vertex_t = typename GraphTy::vertex_type;
using worker_t = WalkWorker<GraphTy, ItrTy>;
using gpu_worker_t =
GPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, diff_model_tag>;
using cpu_worker_t =
CPUWalkWorker<GraphTy, PRNGeneratorTy, ItrTy, diff_model_tag>;
public:
StreamingRRRGenerator(const GraphTy &G, const PRNGeneratorTy &master_rng,
IMMExecutionRecord &record, size_t num_cpu_workers,
size_t num_gpu_workers,
const std::unordered_map<size_t, size_t> &worker_to_gpu)
: num_cpu_workers_(num_cpu_workers),
num_gpu_workers_(num_gpu_workers),
record_(record),
console(spdlog::get("Streaming Generator")) {
if (!console) {
console = spdlog::stdout_color_st("Streaming Generator");
}
#ifdef RIPPLES_ENABLE_CUDA
// init GPU contexts
for (auto &m : worker_to_gpu) {
auto gpu_id = m.second;
if (cuda_contexts_.find(gpu_id) == cuda_contexts_.end()) {
cuda_contexts_[gpu_id] =
std::shared_ptr<cuda_ctx<GraphTy>>(cuda_make_ctx(G, gpu_id));
}
}
typename gpu_worker_t::config_t gpu_conf(num_gpu_workers_);
assert(gpu_conf.max_blocks_ * num_gpu_workers_ <= cuda_max_blocks());
auto num_gpu_threads_per_worker = gpu_conf.num_gpu_threads();
auto num_rng_sequences =
num_cpu_workers_ + num_gpu_workers_ * (num_gpu_threads_per_worker + 1);
auto gpu_seq_offset = num_cpu_workers_ + num_gpu_workers_;
#else
assert(num_gpu_workers_ == 0);
size_t num_rng_sequences = num_cpu_workers_;
#endif
// console->info("CPU Workers {}", num_cpu_workers);
// console->info("GPU Workers {}", num_gpu_workers);
// translate user-mapping string into vector
size_t gpu_worker_id = 0;
size_t cpu_worker_id = 0;
for (size_t omp_num = 0; omp_num < num_cpu_workers + num_gpu_workers;
++omp_num) {
#ifdef RIPPLES_ENABLE_CUDA
if (worker_to_gpu.find(omp_num) != worker_to_gpu.end()) {
// create and add a GPU worker
auto gpu_id = worker_to_gpu.at(omp_num);
assert(cuda_contexts_.find(gpu_id) != cuda_contexts_.end());
console->info("> mapping: omp={}\t->\tGPU-device={}", omp_num, gpu_id);
auto rng = master_rng;
rng.split(num_rng_sequences, num_cpu_workers_ + gpu_worker_id);
auto w = new gpu_worker_t(gpu_conf, G, rng, cuda_contexts_.at(gpu_id));
w->rng_setup(
master_rng, num_rng_sequences,
gpu_seq_offset + gpu_worker_id * num_gpu_threads_per_worker);
workers.push_back(w);
++gpu_worker_id;
} else
#endif
{
// create and add a CPU worker
// console->info("> mapping: omp={}\t->\tCPU", omp_num);
// console->info("cpu_worker_id = {}", cpu_worker_id);
auto rng = master_rng;
rng.split(num_rng_sequences, cpu_worker_id);
workers.push_back(new cpu_worker_t(G, rng));
++cpu_worker_id;
}
}
// console->info("Configured");
}
StreamingRRRGenerator(StreamingRRRGenerator &&O)
: num_cpu_workers_(O.num_cpu_workers_),
num_gpu_workers_(O.num_gpu_workers_),
max_batch_size_(O.max_batch_size_),
console(std::move(O.console)),
#if RIPPLES_ENABLE_CUDA
cuda_contexts_(std::move(O.cuda_contexts_)),
#endif
workers(std::move(O.workers)),
mpmc_head(O.mpmc_head.load()),
#if CUDA_PROFILE
prof_bd(std::move(O.prof_bd)),
#endif
record_(O.record_) {
}
~StreamingRRRGenerator() {
#if CUDA_PROFILE
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(prof_bd.d);
console->info("*** BEGIN Streaming Engine profiling");
for (size_t i = 0; i < prof_bd.prof_bd.size(); ++i) {
console->info("+++ BEGIN iter {}", i);
console->info("--- CPU workers");
for (auto &wp : cpu_workers) wp->print_prof_iter(i);
#ifdef RIPPLES_ENABLE_CUDA
console->info("--- GPU workers");
for (auto &wp : gpu_workers) wp->print_prof_iter(i);
#endif
console->info("--- overall");
auto &p(prof_bd.prof_bd[i]);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(p.d_);
console->info("n. sets = {}", p.n_);
console->info("elapsed (ns) = {}", p.d_.count());
console->info("throughput (sets/sec) = {}",
(float)p.n_ * 1e03 / ms.count());
console->info("+++ END iter {}", i);
// execution record
for (auto &wp : workers) {
wp->prof_record(record_.WalkIterations[i], i);
}
}
console->info("--- overall");
console->info("n. sets = {}", prof_bd.n);
console->info("n. iters = {}", prof_bd.prof_bd.size());
console->info("elapsed (ms) = {}", ms.count());
console->info("throughput (sets/sec) = {}",
(float)prof_bd.n * 1e03 / ms.count());
console->info("*** END Streaming Engine profiling");
#endif
for (auto &w : workers) delete w;
#ifdef RIPPLES_ENABLE_CUDA
// for (auto &m : cuda_contexts_) cuda_destroy_ctx(m.second);
#endif
}
IMMExecutionRecord &execution_record() { return record_; }
void generate(ItrTy begin, ItrTy end) {
#if CUDA_PROFILE
auto start = std::chrono::high_resolution_clock::now();
for (auto &w : workers) w->begin_prof_iter();
record_.WalkIterations.emplace_back();
#endif
mpmc_head.store(0);
#pragma omp parallel num_threads(num_cpu_workers_ + num_gpu_workers_)
{
size_t rank = omp_get_thread_num();
workers[rank]->svc_loop(mpmc_head, begin, end);
}
#if CUDA_PROFILE
auto d = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now() - start);
prof_bd.prof_bd.emplace_back(std::distance(begin, end), d);
prof_bd.n += std::distance(begin, end);
prof_bd.d += std::chrono::duration_cast<std::chrono::microseconds>(d);
auto &ri(record_.WalkIterations.back());
ri.NumSets = std::distance(begin, end);
ri.Total = std::chrono::duration_cast<decltype(ri.Total)>(d);
#endif
}
bool isGpuEnabled() const { return num_gpu_workers_ != 0; }
private:
size_t num_cpu_workers_, num_gpu_workers_;
size_t max_batch_size_;
std::shared_ptr<spdlog::logger> console;
#ifdef RIPPLES_ENABLE_CUDA
std::unordered_map<size_t, std::shared_ptr<cuda_ctx<GraphTy>>> cuda_contexts_;
#endif
std::vector<worker_t *> workers;
std::atomic<size_t> mpmc_head{0};
#if CUDA_PROFILE
struct iter_profile_t {
iter_profile_t(size_t n, std::chrono::nanoseconds d) : n_(n), d_(d) {}
size_t n_{0};
std::chrono::nanoseconds d_{0};
};
struct profile_t {
size_t n{0};
std::chrono::microseconds d{0};
std::vector<iter_profile_t> prof_bd;
};
profile_t prof_bd;
#endif
IMMExecutionRecord &record_;
};
} // namespace ripples
#endif // RIPPLES_STREAMING_RRR_GENERATOR_H
|
target_teams_distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute parallel for simd' must be a for loop}}
#pragma omp target teams distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp target teams distribute parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute parallel for simd' directive may not be firstprivate, predetermined as lastprivate}}
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target teams distribute parallel for simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp target teams distribute parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp target teams distribute parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp target teams distribute parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
solver.c | #include "ns/solver.h"
#include "ns/config.h"
#include <stdlib.h>
#include <stdio.h>
// Data wrapper
typedef struct ns_t {
// World
uint64_t world_width;
uint64_t world_width_bounds;
uint64_t world_height;
uint64_t world_height_bounds;
// Fluid
double viscosity;
double density;
double diffusion;
// Time
double time_step;
// World data
double **u;
double **u_prev;
double **v;
double **v_prev;
double **dense;
double **dense_prev;
} ns_t;
/**
* Private definitions
*/
static void ns_velocity_step(ns_t *ns);
static void ns_density_step(ns_t *ns);
static void ns_add_sources_to_targets(const ns_t *ns);
static void
ns_diffuse(const ns_t *ns, uint64_t bounds, double diffusion_value, double **target, const double **source);
static void ns_project(ns_t *ns);
static void ns_advect(const ns_t *ns, uint64_t bounds, double **d, double **d0, double **u, double **v);
static void ns_set_bounds(const ns_t *ns, uint64_t bounds, double **target);
static void ns_swap_matrix(double ***x, double ***y);
static bool is_valid_coordinate(const ns_t *ns, uint64_t x, uint64_t y);
/**
* END Private definitions
*/
/**
* Public
*/
ns_t *ns_create(uint64_t world_width, uint64_t world_height,
double viscosity, double density, double diffusion,
double time_step) {
uint64_t i;
bool error = false;
ns_t *ns = NULL;
ns = (ns_t *) malloc(sizeof(ns_t));
if (ns == NULL) return NULL;
// World
ns->world_width = world_width;
ns->world_width_bounds = ns->world_width + 2;
ns->world_height = world_height;
ns->world_height_bounds = ns->world_height + 2;
// Fluid
ns->viscosity = viscosity;
ns->density = density;
ns->diffusion = diffusion;
// Time
ns->time_step = time_step;
// Allocate world data
ns->u = (double **) calloc(ns->world_height_bounds, sizeof(double *));
ns->u_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *));
ns->v = (double **) calloc(ns->world_height_bounds, sizeof(double *));
ns->v_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *));
ns->dense = (double **) calloc(ns->world_height_bounds, sizeof(double *));
ns->dense_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *));
if (ns->u == NULL || ns->u_prev == NULL
|| ns->v == NULL || ns->v_prev == NULL
|| ns->dense == NULL || ns->dense_prev == NULL) {
error = true;
}
if (!error) {
#pragma omp parallel for \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(i) shared(ns, error)
for (i = 0; i < ns->world_height_bounds; ++i) {
if (error) continue;
ns->u[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
ns->u_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
ns->v[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
ns->v_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
ns->dense[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
ns->dense_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double));
if (ns->u[i] == NULL || ns->u_prev[i] == NULL
|| ns->v[i] == NULL || ns->v_prev[i] == NULL
|| ns->dense[i] == NULL || ns->dense_prev[i] == NULL) {
#pragma omp critical
error = true;
}
}
}
if (error) {
ns_free(ns);
return NULL;
}
return ns;
}
void ns_free(ns_t *ns) {
if (ns == NULL) return;
uint64_t i;
#pragma omp parallel for \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(i) shared(ns)
for (i = 0; i < ns->world_height_bounds; ++i) {
if (ns->u != NULL)
free(ns->u[i]);
if (ns->u_prev != NULL)
free(ns->u_prev[i]);
if (ns->v != NULL)
free(ns->v[i]);
if (ns->v_prev != NULL)
free(ns->v_prev[i]);
if (ns->dense != NULL)
free(ns->dense[i]);
if (ns->dense_prev != NULL)
free(ns->dense_prev[i]);
}
free(ns->u);
free(ns->u_prev);
free(ns->v);
free(ns->v_prev);
free(ns->dense);
free(ns->dense_prev);
free(ns);
}
void ns_tick(ns_t *ns) {
ns_velocity_step(ns);
ns_density_step(ns);
}
bool ns_increase_density(ns_t *ns, uint64_t x, uint64_t y) {
bool status = false;
// Fix due to bounds
x += 1;
y += 1;
if (!is_valid_coordinate(ns, x, y))
fprintf(stderr, "Invalid increase_density coordinates {x: %ld, y: %ld}\n", x, y);
else status = true;
if (status)
ns->dense[y][x] += ns->density;
return status;
}
bool ns_apply_force(ns_t *ns, uint64_t x, uint64_t y, double v_x, double v_y) {
bool status = false;
// Fix due to bounds
x += 1;
y += 1;
if (!is_valid_coordinate(ns, x, y))
fprintf(stderr, "Invalid apply_force coordinates {x: %ld, y: %ld}\n", x, y);
else if (v_x > NS_MAX_FORCE_VELOCITY || v_y > NS_MAX_FORCE_VELOCITY)
fprintf(stdout, "Invalid apply_force velocity {v_x: %lf, v_y: %lf}\n", v_x, v_y);
else status = true;
if (status) {
ns->u[y][x] = v_x != 0 ? v_x : ns->u[y][x];
ns->v[y][x] = v_y != 0 ? v_y : ns->v[y][x];
}
return status;
}
ns_world_t *ns_get_world(const ns_t *ns) {
uint64_t i, x, y;
ns_world_t *world = (ns_world_t *) malloc(sizeof(ns_world_t));
world->world_width = ns->world_width;
world->world_width_bounds = ns->world_width_bounds;
world->world_height = ns->world_height;
world->world_height_bounds = ns->world_height_bounds;
world->world = (ns_cell_t **) calloc(ns->world_height_bounds, sizeof(ns_cell_t *));
#pragma omp parallel \
default(none) private(i) shared(ns, world)
{
#pragma omp for \
schedule(DEFAULT_OPEN_MP_SCHEDULE)
for (i = 0; i < ns->world_height_bounds; ++i)
world->world[i] = (ns_cell_t *) calloc(ns->world_width_bounds, sizeof(ns_cell_t));
#pragma omp for collapse(2) \
schedule(DEFAULT_OPEN_MP_SCHEDULE)
for (y = 0; y < ns->world_height_bounds; ++y) {
for (x = 0; x < ns->world_width_bounds; ++x) {
ns_cell_t cell;
cell.u = &ns->u[y][x];
cell.v = &ns->v[y][x];
cell.density = &ns->dense[y][x];
world->world[y][x] = cell;
}
}
}
return world;
}
void ns_free_world(ns_world_t *world) {
uint64_t i;
#pragma omp parallel for \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(i) shared(world)
for (i = 0; i < world->world_height_bounds; ++i) {
free(world->world[i]);
}
free(world->world);
free(world);
}
/**
* END Public
*/
/**
* Private
*/
static void ns_velocity_step(ns_t *ns) {
ns_add_sources_to_targets(ns);
ns_swap_matrix(&ns->u_prev, &ns->u);
ns_diffuse(ns, 1, ns->viscosity, ns->u, (const double **) ns->u_prev);
ns_swap_matrix(&ns->v_prev, &ns->v);
ns_diffuse(ns, 2, ns->viscosity, ns->v, (const double **) ns->v_prev);
ns_project(ns);
ns_swap_matrix(&ns->u_prev, &ns->u);
ns_swap_matrix(&ns->v_prev, &ns->v);
ns_advect(ns, 1, ns->u, ns->u_prev, ns->u_prev, ns->v_prev);
ns_advect(ns, 2, ns->v, ns->v_prev, ns->u_prev, ns->v_prev);
ns_project(ns);
}
static void ns_density_step(ns_t *ns) {
ns_swap_matrix(&ns->dense_prev, &ns->dense);
ns_diffuse(ns, 0, ns->diffusion, ns->dense, (const double **) ns->dense_prev);
ns_swap_matrix(&ns->dense_prev, &ns->dense);
ns_advect(ns, 0, ns->dense, ns->dense_prev, ns->u, ns->v);
}
static void ns_add_sources_to_targets(const ns_t *ns) {
uint64_t x, y;
#pragma omp parallel for collapse(2) \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(y, x) shared(ns)
for (y = 0; y < ns->world_height_bounds; ++y) {
for (x = 0; x < ns->world_width_bounds; ++x) {
ns->u[y][x] += ns->time_step * ns->u_prev[y][x];
ns->v[y][x] += ns->time_step * ns->v_prev[y][x];
}
}
}
static void
ns_diffuse(const ns_t *ns, uint64_t bounds, double diffusion_value, double **target, const double **source) {
const double a = ns->time_step * diffusion_value * (double) ns->world_width * (double) ns->world_height;
for (uint64_t k = 0; k < 20; k++) {
for (uint64_t y = 1; y <= ns->world_height; ++y) {
for (uint64_t x = 1; x <= ns->world_width; ++x) {
target[y][x] =
(source[y][x] + a * (target[y][x - 1] + target[y][x + 1] + target[y - 1][x] + target[y + 1][x]))
/ (1 + 4 * a);
}
}
ns_set_bounds(ns, bounds, target);
}
}
static void ns_project(ns_t *ns) {
uint64_t x, y;
double h = 1.0 / (double) ns->world_width;
for (y = 1; y <= ns->world_height; ++y) {
for (x = 1; x <= ns->world_width; ++x) {
ns->v_prev[y][x] = -0.5 * h
* (ns->u[y][x + 1] - ns->u[y][x - 1] + ns->v[y + 1][x] - ns->v[y - 1][x]);
ns->u_prev[y][x] = 0;
}
}
ns_set_bounds(ns, 0, ns->v_prev);
ns_set_bounds(ns, 0, ns->u_prev);
for (uint64_t k = 0; k < 20; k++) {
for (y = 1; y <= ns->world_height; ++y) {
for (x = 1; x <= ns->world_width; ++x) {
ns->u_prev[y][x] =
(ns->v_prev[y][x]
+ ns->u_prev[y][x - 1] + ns->u_prev[y][x + 1] + ns->u_prev[y - 1][x] + ns->u_prev[y + 1][x])
/ 4;
}
}
ns_set_bounds(ns, 0, ns->u_prev);
}
#pragma omp parallel for collapse(2) \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(y, x) shared(ns, h)
for (y = 1; y <= ns->world_height; ++y) {
for (x = 1; x <= ns->world_width; ++x) {
ns->u[y][x] -= 0.5 * (ns->u_prev[y][x + 1] - ns->u_prev[y][x - 1]) / h;
ns->v[y][x] -= 0.5 * (ns->u_prev[y + 1][x] - ns->u_prev[y - 1][x]) / h;
}
}
ns_set_bounds(ns, 1, ns->u);
ns_set_bounds(ns, 2, ns->v);
}
static void ns_advect(const ns_t *ns, uint64_t bounds, double **d, double **d0, double **u, double **v) {
uint64_t x, y, x0, x1, y0, y1;
double xx, yy, s0, s1, t0, t1;
double dt0_width = ns->time_step * (double) ns->world_width;
double dt0_height = ns->time_step * (double) ns->world_height;
#pragma omp parallel for collapse(2) \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(y, x, yy, xx, x0, x1, y0, y1, s0, s1, t0, t1) shared(ns, dt0_width, dt0_height, u, v, d, d0)
for (y = 1; y <= ns->world_height; ++y) {
for (x = 1; x <= ns->world_width; ++x) {
xx = (double) x - dt0_width * u[y][x];
yy = (double) y - dt0_height * v[y][x];
// Check xx
if (xx < 0.5)
xx = 0.5;
if (xx > (double) ns->world_width + 0.5)
xx = (double) ns->world_width + 0.5;
x0 = (uint64_t) xx;
x1 = x0 + 1;
// Check yy
if (yy < 0.5)
yy = 0.5;
if (yy > (double) ns->world_height + 0.5)
yy = (double) ns->world_height + 0.5;
y0 = (uint64_t) yy;
y1 = y0 + 1;
s1 = xx - (double) x0;
s0 = 1 - s1;
t1 = yy - (double) y0;
t0 = 1 - t1;
d[y][x] = s0 * (t0 * d0[y0][x0] + t1 * d0[y1][x0])
+ s1 * (t0 * d0[y0][x1] + t1 * d0[y1][x1]);
}
}
ns_set_bounds(ns, bounds, d);
}
static void ns_set_bounds(const ns_t *ns, uint64_t bounds, double **target) {
uint64_t y;
uint64_t x;
#pragma omp parallel for collapse(2) \
schedule(DEFAULT_OPEN_MP_SCHEDULE) \
default(none) private(y, x) shared(ns, target, bounds)
for (y = 1; y <= ns->world_height; ++y) {
for (x = 1; x <= ns->world_width; ++x) {
target[y][0] = (bounds == 1) ? -target[y][1] : target[y][1];
target[y][ns->world_width + 1] = bounds == 1 ? -target[y][ns->world_width] : target[y][ns->world_width];
target[0][x] = bounds == 2 ? -target[1][x] : target[1][x];
target[ns->world_height + 1][x] = bounds == 2 ? -target[ns->world_height][x] : target[ns->world_height][x];
}
}
target[0][0] = 0.5 * (target[0][1] + target[1][0]);
target[ns->world_height + 1][0] = 0.5 * (target[ns->world_height + 1][1] + target[ns->world_height][0]);
target[0][ns->world_width + 1] = 0.5 * (target[0][ns->world_width] + target[1][ns->world_width + 1]);
target[ns->world_height + 1][ns->world_width + 1] =
0.5 * (target[ns->world_height + 1][ns->world_width] + target[ns->world_height][ns->world_width + 1]);
}
static void ns_swap_matrix(double ***x, double ***y) {
double **tmp = *x;
*x = *y;
*y = tmp;
}
static bool is_valid_coordinate(const ns_t *ns, uint64_t x, uint64_t y) {
return x >= 0 && x < ns->world_width_bounds
&& y >= 0 && y < ns->world_height_bounds;
}
/**
* END Private
*/
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
error.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <math.h>
#include "header.h"
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5])
{
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
double rms_local[5];
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel default(shared) \
private(i,j,k,m,zeta,eta,xi,add,u_exact,rms_local) shared(rms)
{
for (m = 0; m < 5; m++) {
rms_local[m] = 0.0;
}
#pragma omp for nowait
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[k][j][i][m]-u_exact[m];
rms_local[m] = rms_local[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp atomic
rms[m] += rms_local[m];
}
} //end parallel
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5])
{
int i, j, k, d, m;
double add;
double rms_local[5];
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel default(shared) private(i,j,k,m,add,rms_local) \
shared(rms)
{
for (m = 0; m < 5; m++) {
rms_local[m] = 0.0;
}
#pragma omp for nowait
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
add = rhs[k][j][i][m];
rms_local[m] = rms_local[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp atomic
rms[m] += rms_local[m];
}
} //end parallel
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
|
decoder.c | /*! @file
* @brief
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "config.h"
#include "timing.h"
#if WARPSTUFF
#include "WarpLib.h"
#endif
//#include <stdlib.h>
#include <stddef.h>
#include <math.h>
#include <memory.h>
#include <time.h>
//#include <stdint.h>
#ifndef DEBUG
#define DEBUG (1 && _DEBUG)
#endif
#ifndef TIMING
#define TIMING (1 && _TIMING)
#endif
#ifndef XMMOPT
#define XMMOPT (1 && _XMMOPT)
#endif
#define GEN_LICENSE 0
#ifndef PI
#define PI 3.14159265359f
#endif
#ifdef _WINDOWS
#include <windows.h>
#elif __APPLE__
#include "macdefs.h"
#else
#ifndef ZeroMemory
#define ZeroMemory(p,s) memset(p,0,s)
#endif
#endif
#include <stdio.h>
#include <assert.h>
#include <emmintrin.h> // Intel aligned alloc and free
#include "dump.h"
#include "decoder.h"
#include "codec.h"
#include "vlc.h"
#include "codebooks.h" // References to the codebooks
#include "debug.h"
#include "color.h" // Color formats supported by image processing routines
#include "image.h"
#include "filter.h"
#include "spatial.h"
#include "temporal.h"
//#include "logo40x5.h"
#include "convert.h"
#include "wavelet.h"
#include "bitstream.h"
#include "frame.h"
#include "cpuid.h"
#include "bayer.h"
#include "metadata.h"
#include "DemoasicFrames.h" //TODO: Change filename to lower case
#include "swap.h"
#include "draw.h"
#include "RGB2YUV.h"
#include "lutpath.h"
#include "exception.h"
extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
extern void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
extern void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#ifdef SPI_LOADER
#include "spi.h"
#include "keyframes.h"
#endif
#ifndef DUMP
#define DUMP (0 && _DUMP)
#endif
#define ERROR_TOLERANT 1
#if defined(_WINDOWS) && DEBUG
#include <tchar.h> // For printing debug string in the console window
#endif
#define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform
#define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform
#if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch
#if _DECODE_TRANSFORM == 0
#define _DECODE_TRANSFORM 1
#endif
#endif
#ifndef _FSMBUFFER
#define _FSMBUFFER 0
#endif
// Turn off saturation in this file
#ifdef SATURATE
#undef SATURATE
#endif
#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x))
#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x))
//#define SATURATE8S(x) SATURATE_8S(x)
//#define SATURATE(x) (x)
// Enable or disable function inlining
#if 1 //DEBUG
#define inline
#else
#define inline __forceinline
#endif
// Pixel size used for computing the compression ratio
#define BITS_PER_PIXEL 8
// Default processor capabilities
#define DEFAULT_FEATURES (_CPU_FEATURE_MMX )
#define DEMOSAIC_DELAYLINES 4
// Forward references
void AllocDecoderGroup(DECODER *decoder);
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format);
void EraseDecoderFrames(DECODER *decoder);
TRANSFORM *AllocGroupTransform(GROUP *group, int channel);
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format);
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile);
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch);
#endif
bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision);
extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index,
uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading,
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes);
//extern void ComputeCube(DECODER *decoder);
extern bool NeedCube(DECODER *decoder);
extern void LoadTweak();
//extern int g_topdown;
//extern int g_bottomup;
// Performance measurements
#if _TIMING
extern TIMER tk_decompress; // Timers
extern TIMER tk_decoding;
extern TIMER tk_convert;
extern TIMER tk_inverse;
extern COUNTER decode_byte_count; // Counters
extern COUNTER sample_byte_count;
extern COUNTER alloc_group_count;
extern COUNTER alloc_transform_count;
extern COUNTER alloc_buffer_count;
extern COUNTER spatial_decoding_count;
extern COUNTER temporal_decoding_count;
extern COUNTER progressive_decode_count;
#endif
#if 0
// Table that maps from decoded format to pixel size
static const int PixelSize[] =
{
0, // DECODED_FORMAT_UNSUPPORTED
2, // DECODED_FORMAT_YUYV
2, // DECODED_FORMAT_UYVY
2, // DECODED_FORMAT_420
4, // DECODED_FORMAT_RGB32
3, // DECODED_FORMAT_RGB24
2, // DECODED_FORMAT_RGB555
2, // DECODED_FORMAT_RGB565
#if 0
2, // DECODED_FORMAT_YUYV_INVERTED
2, // DECODED_FORMAT_UYVY_INVERTED
2, // DECODED_FORMAT_420_INVERTED
#endif
4, // DECODED_FORMAT_RGB32_INVERTED
3, // DECODED_FORMAT_RGB24_INVERTED
2, // DECODED_FORMAT_RGB555_INVERTED
2, // DECODED_FORMAT_RGB565_INVERTED
3, // DECODED_FORMAT_V210,
4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format.
4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel
};
#if _DEBUG
char *decoded_format_string[] =
{
"Unsupported",
"YUYV",
"UYUV",
"420",
"RGB32",
"RGB24",
"RGB555",
"RGB565",
#if 0
"YUYV Inverted",
"UYVY Inverted",
"420 Inverted",
#endif
//#if BUILD_PROSPECT
"RGB32 Inverted",
"RGB24 Inverted",
"RGB555 Inverted",
"RGB565 Inverted",
"V210"
//#endif
};
#endif
#else
static const int pixel_size_table[] =
{
0, // COLOR_FORMAT_UNKNOWN
2, // COLOR_FORMAT_UYVY
2, // COLOR_FORMAT_YUYV
2, // COLOR_FORMAT_YVYU
0, // COLOR_FORMAT_YV12
0, // COLOR_FORMAT_I420
2, // COLOR_FORMAT_RGB16
3, // COLOR_FORMAT_RGB24
4, // COLOR_FORMAT_RGB32
0,
3, // COLOR_FORMAT_V210
0, // COLOR_FORMAT_RGB10
4, // COLOR_FORMAT_YU64
4, // COLOR_FORMAT_YR16
4, // COLOR_FORMAT_YUVA
};
static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]);
static int PixelSize(int format)
{
int pixel_size = 0;
// Mask off the other fields in the format descriptor
// Use the lookup table to determine the pixel size (if possible)
if (0 <= format && format < pixel_size_table_length)
{
pixel_size = pixel_size_table[format];
//return pixel_size;
}
//TODO: Change the rest of this routine into one big switch statement
// Is this an Avid format?
else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END)
{
switch (format)
{
case COLOR_FORMAT_CbYCrY_8bit:
case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane
pixel_size = 1;
break;
case COLOR_FORMAT_CbYCrY_16bit:
case COLOR_FORMAT_CbYCrY_16bit_2_14:
case COLOR_FORMAT_CbYCrY_16bit_10_6:
pixel_size = 2;
break;
default:
assert(0);
pixel_size = 2; // Assume 16 bits per pixel if the format is unknown
break;
}
}
// Is this a Bayer format?
else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END)
{
pixel_size = (format - 100);
if(pixel_size > 2)
pixel_size = 2;
}
else if (format == COLOR_FORMAT_RG48)
pixel_size = 6;
else if (format == COLOR_FORMAT_RG64)
pixel_size = 8;
else if (format == COLOR_FORMAT_B64A) {
pixel_size = 8;
}
return pixel_size;
}
#endif
int DecodedPixelSize(DECODED_FORMAT format)
{
int pixel_size = 0;
// Compute the pixel size
switch (format)
{
case DECODED_FORMAT_YUYV:
pixel_size = 2;
break;
case DECODED_FORMAT_RGB32:
pixel_size = 4;
break;
case DECODED_FORMAT_RG48:
pixel_size = 6;
break;
case DECODED_FORMAT_CT_UCHAR:
pixel_size = 2;
break;
case DECODED_FORMAT_CT_SHORT:
case DECODED_FORMAT_CT_SHORT_2_14:
case DECODED_FORMAT_CT_USHORT_10_6:
pixel_size = 4;
break;
case DECODED_FORMAT_CT_10Bit_2_8:
case DECODED_FORMAT_V210:
// This routine should not be called to compute the pixel sizes for these formats
assert(0);
return 0;
break;
case DECODED_FORMAT_ROW16U:
pixel_size = 4;
break;
default:
assert(0);
return 0;
break;
}
return pixel_size;
}
#if 0
// Convert FOURCC code to a string
static void str4cc(char *string, uint32_t marker)
{
char *p = (char *)&marker + 3;
char *s = string;
int i;
for (i = 0; i < 4; i++)
*(s++) = *(p--);
*s = '\0';
}
#endif
void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h)
{
int origw,origh, guess = 0;
origw = decoder->frame.width;
origh = decoder->frame.height;
switch(decoder->frame.resolution)
{
case DECODED_RESOLUTION_FULL:
break;
case DECODED_RESOLUTION_HALF:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
origw *= 8;
origh *= 8;
break;
case DECODED_RESOLUTION_FULL_DEBAYER:
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved.
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
origw *= 2;
break;
case DECODED_RESOLUTION_HALF_VERTICAL:
origh *= 2;
break;
}
if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0)
guess = 1;
// if guess default values, we can't trust them
if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9)
guess = 1;
if(decoder->pixel_aspect_x && decoder->pixel_aspect_y)
{
int j,den,num;
decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y;
decoder->codec.picture_aspect_y = den = origh;
for(j=2; j<num+den; j++)
{
while(num == (num/j)*j && den == (den/j)*j)
{
num /= j;
den /= j;
}
}
decoder->codec.picture_aspect_x = num;
decoder->codec.picture_aspect_y = den;
guess = 0;
}
if(guess)
{
if(origw > 720) //HD.
{
if(origh == 1080)
{
if(origw == 2048)
*w=origw,*h=origh;
else
*w=16,*h=9; // assume 16x9
}
else if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
else
{
if(origh == 720)
{
*w=16,*h=9; // assume 16x9
}
else
{
*w=origw,*h=origh; // assume square pixel.
}
}
}
else
{
*w=decoder->codec.picture_aspect_x;
*h=decoder->codec.picture_aspect_y;
}
}
bool IsValidFrameResolution(int resolution)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
return true;
default:
return false;
}
}
// Return true if this decoder can decode to quarter resolution
bool IsQuarterResolutionEnabled(DECODER *decoder)
{
return true;
}
size_t DecoderSize()
{
return sizeof(DECODER);
}
void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs)
{
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder);
}
#endif
{
//TODO: Clear the decoder before setting the CPU limit and affinity
int i;
//int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0;
//save key params
Thread_cntrl saved_params = decoder->thread_cntrl;
// Clear everything
memset(decoder, 0, sizeof(DECODER));
//restore key params
if(saved_params.set_thread_params == 1) // used by the DShow Interface
{
decoder->thread_cntrl = saved_params;
}
#if _TIMING
InitTiming();
#endif
// Set the file for status information during decoding
decoder->logfile = logfile;
// Initialize the decoding error to no error
decoder->error = CODEC_ERROR_OKAY;
// Most recent marker found during decoding
decoder->marker = 0;
// Count of frames decoded
decoder->frame_count = 0;
// Set the codebooks that will be used for decoding
if (cs != NULL)
{
// Use the codeset provided in the call
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
// Codebook for decoding highpass coefficients
decoder->magsbook[i] = cs[i].magsbook;
// Codebook for decoding runs of coefficients
decoder->runsbook[i] = cs[i].runsbook;
// Lookup table for fast codebook search
decoder->fastbook[i] = cs[i].fastbook;
}
}
else
{
// Use the default codeset
decoder->magsbook[0] = cs9.magsbook;
decoder->runsbook[0] = cs9.runsbook;
decoder->fastbook[0] = cs9.fastbook;
}
// Initialize the codec state
InitCodecState(&decoder->codec);
InitScratchBuffer(&decoder->scratch, NULL, 0);
#if _DUMP
// Initialize the descriptor for controlling debug output
decoder->dump.enabled = false;
decoder->dump.channel_mask = 0;
decoder->dump.wavelet_mask = 0;
memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory));
memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename));
#endif
}
//REDTEST
decoder->frm = 0;
decoder->run = 1;
#if _ALLOCATOR
decoder->allocator = NULL;
#endif
decoder->initialized = 1; //DAN20060912
}
void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey)
{
if (decoder && licensekey)
{
const unsigned char unlicensed[16] = {0};
//memset(unlicensed, 0, sizeof(unlicensed));
// Has the license been set?
if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0)
{
// Copy the license into the decoder
memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey));
}
}
}
// Free data allocated within the decoder
void ClearDecoder(DECODER *decoder)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Free the transforms allocated in the decoder
int i;
if(decoder->initialized == 0)
return; // nothing to free //DAN20060912
#if _GRAPHICS
DrawClose(decoder);
#endif
for(i=0; i<=METADATA_PRIORITY_MAX; i++)
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
if(decoder->sqrttable)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->sqrttable);
#else
MEMORY_FREE(decoder->sqrttable);
#endif
decoder->sqrttable = NULL;
}
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
#if _ALLOCATOR
FreeTransform(allocator, decoder->transform[i]);
#else
FreeTransform(decoder->transform[i]);
#endif
decoder->transform[i] = NULL;
}
if(decoder->aligned_sample_buffer)
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
#endif
decoder->aligned_sample_buffer = NULL;
decoder->aligned_sample_buffer_size = 0;
}
if(decoder->tools)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->tools);
#else
MEMORY_FREE(decoder->tools);
#endif
decoder->tools = NULL;
}
// Free the buffer allocated for decoding
if (decoder->buffer != NULL)
{
#if DEBUG_BUFFER_USAGE
int i;
char *ptr = (char *)decoder->buffer;
FILE *fp = fopen("C:/free.txt", "a");
fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size);
i = decoder->buffer_size-1;
while(ptr[i] == 1) i--;
fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size);
fclose(fp);
#endif
#if _ALLOCATOR
FreeAligned(allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
// Clear the fields in the scratch buffer descriptor
memset(&decoder->scratch, 0, sizeof(SCRATCH));
// Eventually the buffer and buffer size fields will be obsolete
}
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
// Do not attempt to free the codebooks since the
// codebook pointers are references to static tables
// Can free some of the data structures allocated by the decoder
FreeCodebooks(decoder);
#if _INTERLACED_WORKER_THREADS
if(decoder->interlaced_worker.lock_init) // threads started
{
int i;
// Signal this thread to stop
SetEvent(decoder->interlaced_worker.stop_event);
// Free all handles used by the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307
CloseHandle(decoder->interlaced_worker.handle[i]);
CloseHandle(decoder->interlaced_worker.start_event[i]);
CloseHandle(decoder->interlaced_worker.done_event[i]);
}
CloseHandle(decoder->interlaced_worker.row_semaphore);
CloseHandle(decoder->interlaced_worker.stop_event);
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.handle[i] = 0;
decoder->interlaced_worker.start_event[i] = 0;
decoder->interlaced_worker.done_event[i] = 0;
}
decoder->interlaced_worker.row_semaphore = 0;
decoder->interlaced_worker.stop_event = 0;
}
// Free the critical section used by the worker threads
DeleteCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 0;
#endif
#if _THREADED
if(decoder->entropy_worker_new.pool.thread_count)
{
ThreadPoolDelete(&decoder->entropy_worker_new.pool);
DeleteLock(&decoder->entropy_worker_new.lock);
}
if(decoder->worker_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->worker_thread.pool);
DeleteLock(&decoder->worker_thread.lock);
}
if(decoder->draw_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->draw_thread.pool);
DeleteLock(&decoder->draw_thread.lock);
}
/*
if(decoder->qt_convert_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_convert_worker.pool);
DeleteLock(&decoder->qt_convert_worker.lock);
}
if(decoder->qt_scale_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_scale_worker.pool);
DeleteLock(&decoder->qt_scale_worker.lock);
}
*/
if(decoder->parallelDecoder)
{
if(decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool);
DeleteLock(&decoder->parallelDecoder->decoder_thread.lock);
decoder->parallelDecoder->decoder_thread.pool.thread_count = 0;
}
ClearDecoder(decoder->parallelDecoder);
#if _ALLOCATOR
Free(decoder->allocator, decoder->parallelDecoder);
#else
MEMORY_FREE(decoder->parallelDecoder);
#endif
decoder->parallelDecoder = NULL;
}
#endif
//MEMORY_ALIGNED_FREE(RawBayer16);
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = 0;
decoder->RGBFilterBufferSize = 0;
}
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = 0;
decoder->RawBayerSize = 0;
}
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = 0;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
FreeAligned(decoder->allocator, decoder->RawCube);
decoder->RawCube = 0;
}
if(decoder->Curve2Linear)
{
FreeAligned(decoder->allocator, decoder->Curve2Linear);
decoder->Curve2Linear = 0;
}
if(decoder->Linear2CurveRed)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->BYR4LinearRestore)
{
FreeAligned(decoder->allocator, decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->GammaContrastRed)
{
FreeAligned(decoder->allocator, decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
FreeAligned(decoder->allocator, decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
FreeAligned(decoder->allocator, decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
Free(decoder->allocator, decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLensGoPro = 0;
decoder->lastLensSphere = 0;
decoder->lastLensFill = 0;
decoder->lastLensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
}
#endif
if(decoder->overrideData)
{
Free(decoder->allocator, decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
Free(decoder->allocator, decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
}
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
decoder->StereoBufferSize = 0;
}
if(decoder->RawCube)
{
MEMORY_ALIGNED_FREE(decoder->RawCube);
decoder->RawCube = NULL;
}
if(decoder->Curve2Linear)
{
MEMORY_ALIGNED_FREE(decoder->Curve2Linear);
decoder->Curve2Linear = NULL;
}
if(decoder->BYR4LinearRestore)
{
MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if(decoder->Linear2CurveRed)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if(decoder->Linear2CurveGrn)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if(decoder->Linear2CurveBlu)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if(decoder->GammaContrastRed)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if(decoder->GammaContrastGrn)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if(decoder->GammaContrastBlu)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if(decoder->LUTcache)
MEMORY_FREE(decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
#if WARPSTUFF
{
if (decoder->lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, decoder->lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer);
#endif
if (decoder->mesh)
geomesh_destroy(mesh);
decoder->mesh = NULL;
decoder->lens_correct_buffer = NULL;
decoder->lastLensOffsetX = 0;
decoder->lastLensOffsetY = 0;
decoder->lastLensOffsetZ = 0;
decoder->lastLensOffsetR = 0;
decoder->lastLensZoom = 0;
decoder->lastLensFishFOV = 0;
decoder->lastLlensGoPro = 0;
decoder->lastLlensSphere = 0;
decoder->lastLlensFill = 0;
decoder->lastLlensStyleSel = 0;
memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC));
memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST));
}
#endif
if(decoder->overrideData)
{
MEMORY_FREE(decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for(i=0; i<64; i++)
{
if(decoder->mdc[i])
MEMORY_FREE(decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#endif
#ifdef SPI_LOADER
SPIReleaseAll(decoder);
//KeyframesReleaseAll(decoder);
#endif
decoder->initialized = 0;// cleared
}
void ExitDecoder(DECODER *decoder)
{
// Let the caller keep the logfile open or choose to close it
//if (logfile) fclose(logfile);
// Free data allocated within the decoder
ClearDecoder(decoder);
}
// Allocate the data structures for decoding a group
void AllocDecoderGroup(DECODER *decoder)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;//DAN07022004
int channel;
assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004
{
TRANSFORM *transform = decoder->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) {
decoder->error = CODEC_ERROR_TRANSFORM_MEMORY;
return;
}
memset(transform, 0, sizeof(TRANSFORM));
decoder->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
}
}
// Allocate the buffer used for intermediate results during decoding
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
int cpus;
size_t size;
size_t row_size;
char *buffer;
#if 0
// Allocate a buffer large enough for six rows of cache lines
size = width * sizeof(PIXEL);
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 2 * TRANSFORM_MAX_CHANNELS * size;
#else
// Allocate a buffer large enough for nine rows of cache lines
size = width * sizeof(PIXEL) * 4;
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 3 * TRANSFORM_MAX_CHANNELS * size;
#endif
switch (format)
{
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
// Increase the buffer size for decoding to the V210 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_CbYCrY_10bit_2_8:
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
// Increase the buffer size for decoding to the YUV16 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 8 * 2 * row_size;
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_WP13:
// Increase the buffer size for decoding to the YUV16 format
row_size = 6 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 12 * 2 * row_size;
break;
case DECODED_FORMAT_RG64:
// Increase the buffer size for decoding to the YUV16 format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
case DECODED_FORMAT_BYR3:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_BYR4:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_W13A:
// Increase the buffer size for decoding to the B64A format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
default:
// Increase the buffer size for YUV to RGB conversion
row_size = 3 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 2 * 2 * row_size;
break;
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if(cpus > 4)
size *= 4;
if(cpus > 16) //DAN20120803 -- 4444 clips
size *= 2;
// Has a buffer already been allocated?
if (decoder->buffer != NULL)
{
// Is the buffer large enough?
if (decoder->buffer_size < size)
{
// Free the previous buffer
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
}
else
{
return true;
}
}
buffer = decoder->buffer;
if(buffer == NULL)
{
// Allocate the decoding buffer
#if _ALLOCATOR
buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(buffer == NULL)
{
return false;
}
}
#if DEBUG_BUFFER_USAGE
memset(buffer, 1, size);
#endif
// Save the buffer and its size in the decoder
decoder->buffer = buffer;
decoder->buffer_size = size;
// Initialize the scratch space descriptor
InitScratchBuffer(&decoder->scratch, buffer, size);
// allocate buffer for each debayer/color formating thread
{
int i;
size = (width+16)*3*2*4*2*4;// sixteen lines
if(height*4 > width*3) //square or tall images where running out of scratch space for zooms.
size *= 1 + ((height+(width/2))/width);
if (decoder->threads_buffer_size < size)
{
for(i=0;i<_MAX_CPUS;i++)
{
if(decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
}
for(i=0;i<cpus;i++)
{
if(decoder->threads_buffer[i] == NULL)
{
#if _ALLOCATOR
decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if(decoder->threads_buffer[i] == NULL)
{
return false;
}
}
}
decoder->threads_buffer_size = size;
}
// Eventually the scratch space descriptor will replace the buffer and buffer_size fields
return true;
}
bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
// Check that the dimensions are valid
assert(width > 0);
assert(height > 0);
// Just call the allocation routine
return AllocDecoderBuffer(decoder, width, height, format);
}
void ClearTransformFlags(DECODER *decoder)
{
TRANSFORM **transform_array = decoder->transform;
int channel;
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)
{
TRANSFORM *transform = transform_array[channel];
int index;
if (transform == NULL) break;
for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++)
{
IMAGE *wavelet = transform->wavelet[index];
if (wavelet != NULL) {
wavelet->band_valid_flags = 0;
wavelet->band_started_flags = 0;
}
}
}
}
// Initialize the tables for decoding the wavelet transforms
void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands)
{
size_t subband_table_size = num_subbands * sizeof(int);
memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index));
memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size);
memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index));
memcpy(decoder->subband_band_index, subband_band_index, subband_table_size);
}
#if 0
static bool IsValidFormat(int format)
{
bool valid_format = true;
//TODO: Change this routine into a switch statement
if(format == COLOR_FORMAT_BYR5)
return true; // can decode to BYR5
if(format == COLOR_FORMAT_BYR4)
return true; // can decode to BYR4
if(format == COLOR_FORMAT_BYR3)
return true; // can decode to BYR3
if(format == COLOR_FORMAT_BYR2)
return true; // can decode to BYR2
if(format == COLOR_FORMAT_RG48)
return true; // can decode to RGB48
if(format == COLOR_FORMAT_RG64)
return true; // can decode to RGBA64
if (format == COLOR_FORMAT_B64A)
{
return true; // Can decode to B64A
}
if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) {
valid_format = false;
}
return valid_format;
}
#endif
#if _INTERLACED_WORKER_THREADS
void StartInterlaceWorkerThreads(DECODER *decoder)
{
int i;
if(decoder->interlaced_worker.lock_init == 0)
{
// Create events for starting the worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create a semaphore to signal the worker threads to process rows
decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
// Create an event for each worker thread to signal that it has finished
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create an event for forcing the worker threads to terminate
decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL);
// Zero the count of worker threads that are active
decoder->interlaced_worker.thread_count = 0;
// Initialize the lock for controlling access to the worker thread data
InitializeCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 1;
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.id[i] = 0;
decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]);
assert(decoder->interlaced_worker.handle[i] != NULL);
}
}
}
#endif
#if 0
int TestException(int x)
{
static volatile int y1 = 100;
volatile int x1 = x;
return y1 / x1;
}
#endif
// Process device driver request to initialize the decoder
#if _ALLOCATOR
bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#else
bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#endif
{
CODESET codesets[CODEC_NUM_CODESETS];
int i;
int cpus;
//int x = 0;
#if CODEC_NUM_CODESETS == 3
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET));
#elif CODEC_NUM_CODESETS == 2
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
#else
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
#endif
#ifdef _WINDOWS
// Set the handler for system exceptions
SetDefaultExceptionHandler();
#endif
//TestException(x);
// Clear all decoder fields except the logfile and set the codebooks for decoding
InitDecoder(decoder, logfile, &codesets[0]);
#if _ALLOCATOR
decoder->allocator = allocator;
#endif
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
assert(cpus > 0 && cpus <= _MAX_CPUS);
// Decode to half resolution?
if (resolution == DECODED_RESOLUTION_HALF)
{
// Reduce the frame size by half in each dimension
width = width/2;
height = height/2;
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Reduce the frame size by one fourth in each dimension
width = width/4;
height = height/4;
}
// Initialize the codebooks
#if _ALLOCATOR
if (!InitCodebooks(decoder->allocator, codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#else
if (!InitCodebooks(codesets)) {
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#endif
// Initize the FSM
InitDecoderFSM(decoder, &codesets[0]);
// Check the frame dimensions and format
//assert(width > 0);
//assert(height > 0);
// assert(IsValidFormat(format));
#if _THREADED_DECODER
// Create a semaphore to signal the transform thread to begin processing
// Initialize the transform queue
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue));
#endif
#if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0
StartInterlaceWorkerThreads(decoder);
#endif
#if _THREADED
#if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
// Initialize the lock that controls access to the generic worker thread data
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
cpus,
WorkerThreadProc,
decoder);
#endif
#endif
// Set the frame dimensions and format
SetDecoderFormat(decoder, width, height, format, resolution);
// Allocate the data structure for decoding the samples
AllocDecoderGroup(decoder);
// Note that this code assumes that the samples to decode are groups
// as opposed to isolated frames which are not supported in this code
// Allocate a buffer for storing intermediate results during decoding
if (!AllocDecoderBuffer(decoder, width, height, format)) {
return false;
}
// Should check that the finite state machine tables were initialized
assert(decoder->fsm[0].table.flags < 0);
// Initialize the finite state machine for this decoder
for(i=0; i<CODEC_NUM_CODESETS; i++)
{
InitFSM(&decoder->fsm[i], codesets[i].fsm_table);
#if _COMPANDING
// Scale the values in the finite state machine entries for companding
ScaleFSM(&decoder->fsm[i].table);
#endif
}
// Indicate that the decoder has been initialized
decoder->state = DECODER_STATE_INITIALIZED;
#if (1 && DUMP)
// Write the wavelet bands as images
SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY);
SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME);
SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/);
// SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/);
SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX);
// Set this flag to enable output
decoder->dump.enabled = true;
#endif
#if _TIMING
// Initialize the global timers and counters
InitTiming();
#endif
//DAN20160203 Fix for a memory leak in InitCookbooks
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
#if _ALLOCATOR
Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL;
Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL;
#else
MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL;
MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL;
MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL;
#endif
}
// The decoder has been initialized successfully
return true;
}
void DecodeEntropyInit(DECODER *decoder)
{
int cpus = 1;
if(decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit)
{
cpus = decoder->cfhddata.cpu_limit;
decoder->thread_cntrl.limit = cpus;
decoder->thread_cntrl.set_thread_params = 1;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus<<16;
}
assert(cpus > 0 && cpus <= _MAX_CPUS);
#if _THREADED
#if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0)
{
int threads = cpus;
if(threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
#endif
#endif
}
bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize)
{
if(decoder->overrideData)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->overrideData);
#else
MEMORY_FREE(decoder->overrideData);
#endif
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
if(overrideSize)
{
#if _ALLOCATOR
decoder->overrideData = Alloc(decoder->allocator, overrideSize);
#else
decoder->overrideData = MEMORY_ALLOC(overrideSize);
#endif
if(decoder->overrideData)
{
memcpy(decoder->overrideData, overrideData, overrideSize);
decoder->overrideSize = overrideSize;
}
}
else
{
int i;
for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker.
// This database cleariing was added but I don't know why.
{
if(decoder->DataBases[i])
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->DataBases[i]);
#else
MEMORY_FREE(decoder->DataBases[i]);
#endif
decoder->DataBases[i] = NULL;
decoder->DataBasesSize[i] = 0;
decoder->DataBasesAllocSize[i] = 0;
}
}
}
return true;
}
TRANSFORM *AllocGroupTransform(GROUP *group, int channel)
{
#if _ALLOCATOR
//TODO:ALLOC Change this routine to take an allocator as the first argument
ALLOCATOR *allocator = NULL;
#endif
TRANSFORM *transform;
// Channel zero is a special case because it may mean
// that the group header has not been decoded yet
if (channel != 0)
{
// Make sure that the channel number is in range
assert(0 <= channel && channel < group->header.num_channels);
if (!(0 <= channel && channel < group->header.num_channels))
return NULL;
}
transform = group->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL) {
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) return NULL;
memset(transform, 0, sizeof(TRANSFORM));
group->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
return transform;
}
//extern FILE *logfile;
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format)
{
size_t size = height * pitch;
union {
uint8_t byte[4];
uint32_t word;
} output;
switch (format)
{
case DECODED_FORMAT_YUYV:
output.byte[0] = COLOR_LUMA_BLACK;
output.byte[1] = COLOR_CHROMA_ZERO;
output.byte[2] = COLOR_LUMA_BLACK;
output.byte[3] = COLOR_CHROMA_ZERO;
break;
default:
//if (logfile) fprintf(logfile,"**Unknown format: %d\n", format);
//assert(0);
output.word = 0;
break;
}
memset(buffer, output.word, size);
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband);
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet);
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading);
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band);
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input);
// Apply the inverse horizontal-temporal transform to reconstruct the output frame
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size);
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision);
#endif
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// The first Bayer routine calls the other Bayer routines for the decoded resolution
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
// New code for handling the original YUV 4:2:2 encoded format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Return true if the rest of the channel does not have to be decoded
static bool CanSkipChannel(DECODER *decoder, int resolution)
{
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int transform_type = transform->type;
// Can the rest of the channel be skipped?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
else
{
const uint32_t decoded_subband_mask_half = 0x7F;
const uint32_t decoded_subband_mask_quarter = 0x0F;
assert(transform_type == TRANSFORM_TYPE_SPATIAL);
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half);
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if(codec->channel == 3)
{
return true;
}
}
}
break;
}
}
// Cannot skip the rest of the channel
return false;
}
#if 0
static bool CanSkipSubband(DECODER *decoder, int subband)
{
// Bitmask indicates which subbands must be decoded for quarter resolution
static uint32_t quarter_resolution_mask = 0x008F;
// Convert the subband number into a bitmask (could use a lookup table)
uint32_t subband_mask = SUBBAND_MASK(subband);
// Select the resolution of the fully decoded frames
int resolution = decoder->frame.resolution;
switch (resolution)
{
case DECODED_RESOLUTION_QUARTER:
//if (4 <= subband && subband <= 6)
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if ((subband_mask & quarter_resolution_mask) == 0) {
return true;
}
}
break;
default:
// Assume that the subband must be decoded
break;
}
return false;
}
#endif
// Return true if the wavelet exists and all bands are valid
static bool AllBandsValid(IMAGE *wavelet)
{
return (wavelet != NULL && BANDS_ALL_VALID(wavelet));
}
#if DEBUG
static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
assert(0);
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
assert(0);
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!AllBandsValid(wavelet))
{
return false;
}
}
// All wavelet bands in all channels are valid
return true;
}
static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) {
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) {
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) {
return false;
}
}
// All lowpass bands in all channels are valid
return true;
}
#endif
static bool
ComputeFrameDimensionsFromFirstWavelet(int transform_type,
int first_wavelet_width,
int first_wavelet_height,
int *frame_width_out,
int *frame_height_out)
{
int frame_width;
int frame_height;
int expansion = 8;
switch (transform_type)
{
case TRANSFORM_TYPE_SPATIAL:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
case TRANSFORM_TYPE_FIELDPLUS:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
default:
assert(0);
return false;
}
// Return the frame dimensions
*frame_width_out = frame_width;
*frame_height_out = frame_height;
return true;
}
// Decode the sample header to determine the type of sample and other parameters
bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header)
{
TAGVALUE segment;
int sample_type;
int sample_size = 0;
// Group index
uint32_t channel_size[TRANSFORM_MAX_CHANNELS];
// Number of channels in the group index
int channel_count;
// Values used for computing the frame width and height (if necessary)
int transform_type = -1;
int first_wavelet_width = 0;
int first_wavelet_height = 0;
int display_height = 0;
int current_channel = 0;
int currentVideoChannel = header->videoChannels;
int find_lowpass_bands = header->find_lowpass_bands & 1;
int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0;
int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0;
if (header == NULL) {
return false;
}
if(currentVideoChannel == 0)
currentVideoChannel = 1;
// Clear the entire sample header to prevent early return from this routine
memset(header, 0, sizeof(SAMPLE_HEADER));
// Clear the error code
header->error = CODEC_ERROR_OKAY;
// Initialize the frame dimensions to unknown
header->width = 0;
header->height = 0;
header->videoChannels = 1;
// Initialize the original pixel format to unknown
header->input_format = COLOR_FORMAT_UNKNOWN;
// Initialize the encoded format to unknown
header->encoded_format = ENCODED_FORMAT_UNKNOWN;
// Clear the frame number in case it is not present in the sample
header->frame_number = 0;
// The video is not progressive if the sample flags are not present
header->hdr_progressive = false;
#if _BITSTREAM_UNALIGNED
// Record the alignment of the bitstream within the sample
SetBitstreamAlignment(input, 0);
#endif
sample_size = input->nWordsUsed;
// Get the type of sample (should be the first tag value pair)
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
header->error = CodecErrorBitstream(input);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = false;
break;
case SAMPLE_TYPE_FRAME: // The second or later frame in a group
header->key_frame = false;
header->difference_frame = true;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_IFRAME: // One frame in the group
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// Treat the video sequence header like a keyframe that can be dropped
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
default:
// Unknown type of sample
header->error = CODEC_ERROR_SAMPLE_TYPE;
return false;
break;
}
// Continue parsing the sample header until all of the information has been found
while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all
(find_uncompressed == 1 && current_channel < 1) ||
display_height == 0 ||
header->width == 0 ||
header->height == 0 ||
header->input_format == COLOR_FORMAT_UNKNOWN ||
header->frame_number == 0 ||
(header->interlaced_flags == 0 && header->hdr_progressive == 0))
{
int chunksize = 0;
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did the bitstream end before the last tag was found?
if (input->error == BITSTREAM_ERROR_UNDERFLOW) {
break;
}
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
header->error = CodecErrorBitstream(input);
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
if(segment.tuple.tag & 0x2000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
chunksize += ((segment.tuple.tag&0xff)<<16);
}
else if(segment.tuple.tag & 0x4000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
}
// else if(tag == CODEC_TAG_INDEX) // handled below
// {
// chunksize = value;
// chunksize &= 0xffff;
// }
else
{
chunksize = 0;
}
if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000)
{
int skip = 1;
if((segment.tuple.tag & 0xff00) == 0x2200) //sample size
{
if(sample_size < chunksize*4)
find_header_info_only = 1;
skip = find_header_info_only;
if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only)
{
BITSTREAM input2;
SAMPLE_HEADER header2;
BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4);
int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx
int eye_sample_size = input->nWordsUsed - eye_offset;
// Search for first sample of the next frame
while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0)
{
eye2 += 4;
chunksize ++;
eye_offset += 4;
eye_sample_size -= 4;
}
// Save the offset to the right stereo sample
header->left_sample_size = eye_offset;
{
InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ);
memset(&header2, 0, sizeof(SAMPLE_HEADER));
header2.find_lowpass_bands = 1;
currentVideoChannel++;
header2.videoChannels = currentVideoChannel;
if(ParseSampleHeader(&input2, &header2))
{
int i;
for(i=0;i<4;i++)
{
if(header2.thumbnail_channel_offsets[i])
header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i];
}
}
}
}
}
if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size
{
header->hdr_uncompressed = 1;
skip = 1;
if(find_lowpass_bands != 1)
break;
}
if((segment.tuple.tag & 0xff00) == 0x2100) //level
{
if(find_lowpass_bands == 1)
{
skip = 0;
}
else
{
skip = 1; // no header data after the fix level
break;
}
}
if(chunksize)
{
if(skip)
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
}
else
{
switch (segment.tuple.tag)
{
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
header->encoder_version = (((segment.tuple.value>>12) & 0xf)<<16) |
(((segment.tuple.value>>8) & 0xf)<<8) |
((segment.tuple.value) & 0xff);
break;
case CODEC_TAG_INDEX:
// Get the number of channels in the index to skip
channel_count = segment.tuple.value;
DecodeGroupIndex(input, (uint32_t *)&channel_size[0], channel_count);
break;
case CODEC_TAG_FRAME_WIDTH:
// Record the frame width in the sample header
header->width = segment.tuple.value;
break;
case CODEC_TAG_FRAME_HEIGHT:
// Record the frame height in the sample header
header->height = segment.tuple.value;
break;
case CODEC_TAG_FRAME_DISPLAY_HEIGHT:
display_height = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_WIDTH:
// Save the width of the smallest wavelet for computing the frame dimensions
first_wavelet_width = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_HEIGHT:
// Save the height of the smallest wavelet for computing the frame dimensions
first_wavelet_height = segment.tuple.value;
break;
case CODEC_TAG_TRANSFORM_TYPE:
// Save the type of transform for computing the frame dimensions (if necessary)
transform_type = segment.tuple.value;
break;
case CODEC_TAG_INPUT_FORMAT:
// Record the original format of the encoded frames
header->input_format = (COLOR_FORMAT)segment.tuple.value;
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
// Record the encoded format (internal representation)
header->encoded_format = (ENCODED_FORMAT)segment.tuple.value;
if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3)
header->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_NUMBER:
// Record the frame number for debugging
header->frame_number = segment.tuple.value;
break;
case CODEC_TAG_INTERLACED_FLAGS:
// Record the flags that indicate the field type
header->interlaced_flags = segment.tuple.value;
break;
case CODEC_TAG_SAMPLE_FLAGS:
// The sample flags specify progressive versus interlaced decoding
header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE);
if (header->hdr_progressive) {
// Clear the interlaced flags
header->interlaced_flags = 0;
}
break;
case CODEC_TAG_LOWPASS_SUBBAND:
if(segment.tuple.value == 0) // low pass band
{
int count = 8;
uint32_t *lptr = (uint32_t *)input->lpCurrentWord;
do
{
uint32_t longword = SwapInt32(lptr[count]);
unsigned short t,v;
t = (longword>>16) & 0xffff;
v = (longword) & 0xffff;
if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4)
{
header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4;
break;
}
count++;
} while(count < 32);
current_channel++;
}
break;
case CODEC_TAG_ENCODED_CHANNELS:
if(header->videoChannels == 1)
{
header->videoChannels = segment.tuple.value;
if(header->videoChannels < 1)
header->videoChannels = 1;
}
break;
case CODEC_TAG_QUALITY_L: //
header->encode_quality &= 0xffff0000;
header->encode_quality |= segment.tuple.value;
break;
case CODEC_TAG_QUALITY_H: //
header->encode_quality &= 0xffff;
header->encode_quality |= segment.tuple.value<<16;
break;
}
// Have the encoded frame dimensions been computed?
if (header->width == 0 || header->height == 0)
{
// Found the first wavelet in the bitstream?
if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0)
{
// The group header did not contain tags for the frame dimensions
// prior to the release of support for RGB 4:4:4, so must attempt to
// compute the frame dimensions from the dimensions of the lowpass band.
int frame_width = 0;
int frame_height = 0;
// Use the dimensions of the first wavelet to compute the frame width and height
if (!ComputeFrameDimensionsFromFirstWavelet(transform_type,
first_wavelet_width,
first_wavelet_height,
&frame_width,
&frame_height)) {
// Could not compute the frame dimensions
header->error = CODEC_ERROR_FRAME_DIMENSIONS;
return false;
}
// Save the frame dimensions in the sample header
header->width = frame_width;
header->height = frame_height;
// No more header information after finding the lowpass band
break;
}
}
if(find_lowpass_bands != 1 && find_uncompressed != 1)
{
// No more header information after the first encoded band
if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER)
{
// Stop looking for header information
break;
}
// No more header information after the frame index
if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX)
{
// Stop looking for header information
break;
}
// No more header information after the lowpass band header
if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH)
{
// Stop looking for header information
break;
}
}
}
}
}
if (header->width == 0 || header->height == 0) {
assert(0);
}
// Fill in the encoded format if it was not present in the header
if (header->encoded_format == ENCODED_FORMAT_UNKNOWN) {
header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count);
}
if (display_height > 0) {
header->height = display_height;
}
if (header->encoded_format == ENCODED_FORMAT_BAYER)
{
header->width *= 2;
header->height *= 2;
if(display_height == 0)
{
if(header->height == 1088)
header->height = 1080;
}
}
// Return true if the header was parsed completely and correctly
return (header->width > 0 &&
header->height > 0 &&
((sample_type == SAMPLE_TYPE_FRAME) ||
(header->input_format != COLOR_FORMAT_UNKNOWN &&
header->encoded_format != ENCODED_FORMAT_UNKNOWN)));
// It is not an error if the frame number was not found in the sample header
}
bool DumpSampleHeader(BITSTREAM *input, FILE *logfile)
{
TAGVALUE segment;
int lowpass_width = 0;
int lowpass_height = 0;
// Parse the sample header until the lowpass band is found
while (lowpass_width == 0 && lowpass_height == 0)
{
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY) {
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0) {
segment.tuple.tag = NEG(segment.tuple.tag);
}
// Check that the tag is valid
assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED);
switch (segment.tuple.tag)
{
case CODEC_TAG_SAMPLE:
fprintf(logfile, "Sample type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_WIDTH:
fprintf(logfile, "Frame width: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_HEIGHT:
fprintf(logfile, "Frame height: %d\n", segment.tuple.value);
break;
case CODEC_TAG_LOWPASS_WIDTH:
lowpass_width = segment.tuple.value;
fprintf(logfile, "Lowpass width: %d\n", lowpass_width);
break;
case CODEC_TAG_LOWPASS_HEIGHT:
lowpass_height = segment.tuple.value;
fprintf(logfile, "Lowpass height: %d\n", lowpass_height);
break;
case CODEC_TAG_TRANSFORM_TYPE:
fprintf(logfile, "Transform type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_INPUT_FORMAT:
fprintf(logfile, "Input format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
fprintf(logfile, "Encoded format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_NUMBER:
fprintf(logfile, "Frame number: %d\n", segment.tuple.value);
break;
}
}
return true;
}
int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work
{
TAGWORD tag,value=1;
unsigned char *pos = NULL;
int readsize = input->nWordsUsed;
if(readsize > 4096) // only need to scan the first few tuplets
{
readsize = 4096;
}
else
{
//Tiny therefore P-frame, nothing to be read so:
value=decoder->real_channels; // return the last value.
return value;
}
pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value);
if(pos && value>1 && skip_to_channel>1)
{
int chunksize = 0;
intptr_t offset;
int count = 0;
do
{
tag = *pos++<<8;
tag |= *pos++;
value = *pos++<<8;
value |= *pos++;
if (tag < 0)
{
tag = NEG(tag);
}
} while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10);
if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4;
input->lpCurrentWord += offset;
input->nWordsUsed -= (int)offset;
{
uint8_t *tag = (uint8_t *)input->lpCurrentWord;
// Search for first sample of the next frame
while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0)
{
input->lpCurrentWord += 4;
input->nWordsUsed -= 4;
tag += 4;
}
}
}
}
//if(value == 0) value = 1; // old non-stereo file
return value;
}
#define SUBPIXEL 64
static short gains[SUBPIXEL+1][4] = {
{0*128,0*128,0x7fff,0*128},
{0*128,2*128,0x7fff,-2*128},
{0*128,5*128,255*128,-4*128},
{0*128,8*128,254*128,-6*128},
{0*128,11*128,253*128,-8*128},
{0*128,14*128,252*128,-10*128},
{0*128,18*128,250*128,-12*128},
{0*128,21*128,248*128,-13*128},
{-1*128,25*128,247*128,-15*128},
{-1*128,29*128,244*128,-16*128},
{-1*128,33*128,241*128,-17*128},
{-2*128,37*128,239*128,-18*128},
{-2*128,41*128,236*128,-19*128},
{-3*128,46*128,233*128,-20*128},
{-3*128,50*128,229*128,-20*128},
{-4*128,55*128,226*128,-21*128},
{-4*128,60*128,221*128,-21*128},
{-5*128,65*128,217*128,-21*128},
{-5*128,70*128,213*128,-22*128},
{-6*128,75*128,209*128,-22*128},
{-7*128,80*128,205*128,-22*128},
{-7*128,85*128,199*128,-21*128},
{-8*128,91*128,194*128,-21*128},
{-9*128,96*128,190*128,-21*128},
{-10*128,102*128,185*128,-21*128},
{-10*128,107*128,179*128,-20*128},
{-11*128,113*128,174*128,-20*128},
{-12*128,118*128,169*128,-19*128},
{-13*128,124*128,164*128,-19*128},
{-14*128,129*128,159*128,-18*128},
{-14*128,135*128,152*128,-17*128},
{-15*128,141*128,147*128,-17*128},
{-16*128,144*128,144*128,-16*128},
{-17*128,147*128,141*128,-15*128},
{-17*128,152*128,135*128,-14*128},
{-18*128,159*128,129*128,-14*128},
{-19*128,164*128,124*128,-13*128},
{-19*128,169*128,118*128,-12*128},
{-20*128,174*128,113*128,-11*128},
{-20*128,179*128,107*128,-10*128},
{-21*128,185*128,102*128,-10*128},
{-21*128,190*128,96*128,-9*128},
{-21*128,194*128,91*128,-8*128},
{-21*128,199*128,85*128,-7*128},
{-22*128,205*128,80*128,-7*128},
{-22*128,209*128,75*128,-6*128},
{-22*128,213*128,70*128,-5*128},
{-21*128,217*128,65*128,-5*128},
{-21*128,221*128,60*128,-4*128},
{-21*128,226*128,55*128,-4*128},
{-20*128,229*128,50*128,-3*128},
{-20*128,233*128,46*128,-3*128},
{-19*128,236*128,41*128,-2*128},
{-18*128,239*128,37*128,-2*128},
{-17*128,241*128,33*128,-1*128},
{-16*128,244*128,29*128,-1*128},
{-15*128,247*128,25*128,-1*128},
{-13*128,248*128,21*128,0*128},
{-12*128,250*128,18*128,0*128},
{-10*128,252*128,14*128,0*128},
{-8*128,253*128,11*128,0*128},
{-6*128,254*128,8*128,0*128},
{-4*128,255*128,5*128,0*128},
{-2*128,0x7fff,2*128,0*128},
{0*128,0*128,0x7fff,0*128}
};
static int lanczos[256] =
{
0,
-2,
-8,
-18,
-33,
-53,
-77,
-106,
-141,
-179,
-223,
-272,
-325,
-384,
-447,
-514,
-586,
-662,
-742,
-826,
-913,
-1004,
-1097,
-1193,
-1290,
-1389,
-1490,
-1591,
-1692,
-1792,
-1892,
-1990,
-2086,
-2179,
-2269,
-2355,
-2436,
-2511,
-2580,
-2643,
-2697,
-2744,
-2781,
-2809,
-2826,
-2832,
-2826,
-2808,
-2776,
-2730,
-2670,
-2594,
-2503,
-2395,
-2271,
-2129,
-1969,
-1790,
-1593,
-1377,
-1141,
-886,
-611,
-315,
0,
336,
692,
1069,
1466,
1884,
2321,
2778,
3255,
3750,
4265,
4797,
5347,
5914,
6498,
7097,
7711,
8340,
8982,
9636,
10301,
10977,
11663,
12357,
13058,
13765,
14477,
15192,
15910,
16630,
17349,
18066,
18781,
18871,
19580,
20285,
20986,
21678,
22361,
23035,
23697,
24348,
24983,
25604,
26206,
26790,
27354,
27898,
28419,
28915,
29387,
29832,
30249,
30638,
30997,
31326,
31623,
31886,
32117,
32314,
32476,
32603,
32695,
32749,
32767, //was 32768, issue for SSE2
32749,
32695,
32603,
32476,
32314,
32117,
31886,
31623,
31326,
30997,
30638,
30249,
29832,
29387,
28915,
28419,
27898,
27354,
26790,
26206,
25604,
24983,
24348,
23697,
23035,
22361,
21678,
20986,
20285,
19580,
18871,
18159,
18066,
17349,
16630,
15910,
15192,
14477,
13765,
13058,
12357,
11663,
10977,
10301,
9636,
8982,
8340,
7711,
7097,
6498,
5914,
5347,
4797,
4265,
3750,
3255,
2778,
2321,
1884,
1466,
1069,
692,
336,
0,
-315,
-611,
-886,
-1141,
-1377,
-1593,
-1790,
-1969,
-2129,
-2271,
-2395,
-2503,
-2594,
-2670,
-2730,
-2776,
-2808,
-2826,
-2832,
-2826,
-2809,
-2781,
-2744,
-2697,
-2643,
-2580,
-2511,
-2436,
-2355,
-2269,
-2179,
-2086,
-1990,
-1892,
-1792,
-1692,
-1591,
-1490,
-1389,
-1290,
-1193,
-1097,
-1004,
-913,
-826,
-742,
-662,
-586,
-514,
-447,
-384,
-325,
-272,
-223,
-179,
-141,
-106,
-77,
-53,
-33,
-18,
-8,
-2,
};
void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom)
{
float yposf,ystepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
offset = -offset;
yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
step = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
step = 32;
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
__m128i half;
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outline128 = (__m128i *)dst;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
for(x=0;x<widthbytes; x+=step)
{
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = o128;
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
_mm_storeu_si128(outline128++, o128);
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
/*ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int r,g,b,yp = ((int)yposf);
yposf += ystepf;
if(yp<0 || yp>= height)
{
memset(dst, 0, widthbytes);
}
else
{
memcpy(dst, &ptr[widthbytes*yp], widthbytes);
}
dst += pitch;
}*/
}
}
void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom, int xx)
{
float yposf,ystepf;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0,step;
__m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1;
uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos;
uint8_t *outlinePos8;
uint16_t *outlinePos16;
offset = -offset;
//yposf = height * offset;
yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset);
ystepf = 1.0f/zoom;
if(yposf < 0.0)
neg = 1;
if(pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
step = 4;
break;
case DECODED_FORMAT_RGB24:
step = 3;
break;
case DECODED_FORMAT_YUYV:
step = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
step = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
step = 6;
break;
default:
assert(0);
break;
}
{
static char zeroline[1024] = {0};
int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if(yoffset < 0) yoffset = 0;
if(yend > height) yend = height;
src += pitch * yoffset;
for(y=yoffset; y<yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int i,t,yp = ((int)yposf);
int rmdr = 63-((int)(yposf*64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for(i=0; i<4; i++)
{
if(yp<0 || yp>= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)];
}
yp++;
rmdr+=64;
}
if(t)
{
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outlinePos8 = (uint8_t *)dst;
outlinePos16 = (uint16_t *)dst;
lineAPos = (uint8_t *)scanline[0];
lineBPos = (uint8_t *)scanline[1];
lineCPos = (uint8_t *)scanline[2];
lineDPos = (uint8_t *)scanline[3];
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_WP13:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RG64:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16+=4;
break;
case DECODED_FORMAT_RG48:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6;
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6;
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6;
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6;
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16+=3;
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0);
outlinePos8[1] = _mm_extract_epi16(o128, 1);
outlinePos8[2] = _mm_extract_epi16(o128, 2);
outlinePos8[3] = _mm_extract_epi16(o128, 3);
outlinePos8+=4;
break;
case DECODED_FORMAT_RGB24:
{
int r,g,b;
b = ((lineAPos[0] * gains[0])>>7) +
((lineBPos[0] * gains[1])>>7) +
((lineCPos[0] * gains[2])>>7) +
((lineDPos[0] * gains[3])>>7); //16-bit
g = ((lineAPos[1] * gains[0])>>7) +
((lineBPos[1] * gains[1])>>7) +
((lineCPos[1] * gains[2])>>7) +
((lineDPos[1] * gains[3])>>7); //16-bit
r = ((lineAPos[2] * gains[0])>>7) +
((lineBPos[2] * gains[1])>>7) +
((lineCPos[2] * gains[2])>>7) +
((lineDPos[2] * gains[3])>>7); //16-bit
if(r<0) r = 0; if(r>65535) r = 65535;
if(g<0) g = 0; if(g>65535) g = 65535;
if(b<0) b = 0; if(b>65535) b = 65535;
lineAPos+=3;
lineBPos+=3;
lineCPos+=3;
lineDPos+=3;
outlinePos8[0] = b >> 8; //b
outlinePos8[1] = g >> 8; //g
outlinePos8[2] = r >> 8; //r
outlinePos8+=3;
/* SSE2 can't load byte alligned
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0); //b
outlinePos8[1] = _mm_extract_epi16(o128, 1); //g
outlinePos8[2] = _mm_extract_epi16(o128, 2); //r
outlinePos8+=3;
*/
}
break;
}
}
else
{
if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
}
}
void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset)
{
float yposf,remainf;
int yposi,tablepos,x,y;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline[4], *tline;
int spitch = pitch/2;
int neg = 0,shift = 0,skip,step;
int origwidthbytes = widthbytes;
int origwidthextra;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
// offset = -offset;
if(offset < 0.0)
neg = 1;
yposf = height * offset;
yposi = (int)floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
// -3 , 0 best small notch at zero?
//
if(neg)
{
yposi -= 2;
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
}
else
{
yposi -= 1; //offset inherent in the table
gainD = gains[tablepos][0];
gainC = gains[tablepos][1];
gainB = gains[tablepos][2];
gainA = gains[tablepos][3];
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
skip = 4;
step = 16;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
step = 16;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
step = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
default:
skip = 6;
step = 32;
break;
}
// scanline[0] = buffer;
// scanline[1] = buffer + width*skip/2;
// scanline[2] = buffer + width*skip/2*2;
// scanline[3] = buffer + width*skip/2*3;
widthbytes += (step - 1);
widthbytes -= (widthbytes % step);
origwidthextra = (origwidthbytes % step);
scanline[0] = buffer;
scanline[1] = buffer + widthbytes/2;
scanline[2] = buffer + widthbytes/2*2;
scanline[3] = buffer + widthbytes/2*3;
for(y=0; y<4; y++)
{
if(yposi+y >=0 && yposi+y<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-yposi-y)*spitch;
else
ptr += (yposi+y)*spitch;
memcpy(scanline[y], ptr, origwidthbytes);
}
else
{
memset(scanline[y], 0, origwidthbytes);
}
}
{
for(y=0;y<height; y++)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-y-1)*spitch;
else
ptr += y*spitch;
outline128 = (__m128i *)ptr;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
//for(x=0;x<width*skip/2; x+=step)
for(x=0;x<widthbytes; x+=step)
{
__m128i half;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip == 6) //RGB48 || WP13
{
if(widthbytes == origwidthbytes || x+16 < origwidthbytes)
_mm_storeu_si128(outline128++, o128);
else
{
//if(x < origwidthbytes+16/*bytes in an SSE2 reg*/)
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra);
outline128++;
}
}
else
{
half = o128;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
if(shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
if(skip != 6) //!RGB48 || !WP13
{
half = _mm_srli_epi16(half,8);
o128 = _mm_srli_epi16(o128,8);
o128 = _mm_packus_epi16(o128, half);
}
if(widthbytes == origwidthbytes || x+32 < origwidthbytes)
{
_mm_storeu_si128(outline128++, o128);
}
else
{
//if(x+16 < origwidthbytes+16)
if(origwidthextra > 16)
{
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16);
}
outline128++;
}
}
tline = scanline[0];
scanline[0] = scanline[1];
scanline[1] = scanline[2];
scanline[2] = scanline[3];
scanline[3] = tline;
if(yposi+y+4 >=0 && yposi+y+4<height)
{
unsigned short *ptr = RGB48;
if(neg)
ptr += (height-1-(yposi+y+4))*spitch;
else
ptr += (yposi+y+4)*spitch;
memcpy(scanline[3], ptr, origwidthbytes);
}
else
{
memset(scanline[3], 0, origwidthbytes);
}
}
}
}
void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*3*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
// int holdstart = width*5/10; // Use to specify a area of uniform stretch
// int holdend = width*5/10;
int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width);
float flatxstep;
float modified_xstep_avg;
float bottomxstep;
float basexstepstart;
float basexstepend;
float range;
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width);
range = (float)(holdend - holdstart);
flatxstep = xstep-z*0.5f*xstep;
modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range);
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if(holdstart == (width-holdend))
{
basexstepstart = bottomxstep;
basexstepend = bottomxstep;
}
else if(holdstart < (width-holdend))
{
float a = (float)holdstart / (float)(width-holdend);
float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend);
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
else
{
float a = (float)(width-holdend) / (float)holdstart;
float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart;
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
#if 0 //Why is this not used?
void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,remainf,xstepf;
int xposi,tablepos,x;
int Ra,Rb,Rc,Rd;
int Ga,Gb,Gc,Gd;
int Ba,Bb,Bc,Bd;
int gainA,gainB,gainC,gainD;
int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0,shift = 0;
float offset = hoffset;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if(eye > 0)
{
zoom *= 1.0 + frameTilt;
}
else
{
zoom /= 1.0 + frameTilt;
}
xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset);
xposf -= width * roffset * 0.5 / zoom;
xposf += (float)line * ((float)width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0/zoom;
memcpy(scanline, RGB48, width*3*2);
{
unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = xposf * 65536.0;
int ixstep = xstepf * 65536.0;
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*3;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * sscanline[(xp-1)*3]);
g += (gains * sscanline[(xp-1)*3+1]);
b += (gains * sscanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*3]);
g += (gains * sscanline[xp*3+1]);
b += (gains * sscanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
/* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width);
if(fxpos >= 0.0 && fxpos <= 1.0)
{
if(z > 0.0)
{
fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos);
fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z);
}
else
{
fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos;
fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z);
}
}
*/
xp = (fxpos * 65536.0*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+3];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+6];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+9];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
{
int i,t,r=0,g=0,b=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
/* if(i == 3) //DAN20101112 this code was crashing disparity zoom
{
gains = lanczos[rmdr]>>1;
r += (gains * scanline[(xp-1)*3]);
g += (gains * scanline[(xp-1)*3+1]);
b += (gains * scanline[(xp-1)*3+2]);
}
else */
{
gains += lanczos[rmdr]>>1;
}
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*3]);
g += (gains * scanline[xp*3+1]);
b += (gains * scanline[xp*3+2]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
}
xx+=3;
}
}
}
/*
memcpy(scanline, RGB48, width*3*2);
{
for(x=0;x<width*3; x+=3) //RGB
{
int r,g,b,xp = ((int)xposf)*3;
xposf += xstepf;
if(xp<0 || xp>= width*3)
{
RGB48[x] = 0;
RGB48[x+1] = 0;
RGB48[x+2] = 0;
}
else
{
r = scanline[xp];
g = scanline[xp+1];
b = scanline[xp+2];
RGB48[x] = r;
RGB48[x+1] = g;
RGB48[x+2] = b;
}
}
}
*/
//_mm_empty();
}
#endif
void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf,xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 4;
}
}
if(eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset);
xposf -= width * roffset * 0.5f;
xposf += line * (width* roffset / ((float)height*zoom));
if(xposf < 0.0)
neg = 1;
xstepf = 1.0f/zoom;
memcpy(scanline, RGB48, width*4*2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f;
int holdstart = width*5/10; // Use to specify a area of uniform stretch
int holdend = width*5/10;
float flatxstep = xstep-z*0.5f*xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
#if MMXSUPPORTED //TODO DANREMOVE
__m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff);
#endif
if(bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if(flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg);
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-1)*4;
src64 = (__m64 *)&sscanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
src64 = (__m64 *)&sscanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&sscanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 1);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * sscanline[xp*4]);
g += (gains * sscanline[xp*4+1]);
b += (gains * sscanline[xp*4+2]);
a += (gains * sscanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
else
{
float fxpos = xbase;
for(x=0;x<width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if(z != 0.0)
{
if(x<holdstart)
{
fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart);
}
else if(x>holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f*(float)width);
rmdr = 63-((xp>>10) & 63);
xp >>= 16;
}
else
{
xp = ixpos>>16;
rmdr = 63-((ixpos>>10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
#if MMXSUPPORTED //TODO DANREMOVE
if(xp>4 && xp<width-4)
{
__m64 *src64;
__m64 *dst64;
__m64 sumx16;
__m64 rgbx16;
__m64 gain16;
int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error.
src64 = (__m64 *)&scanline[linepos];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
src64 = (__m64 *)&scanline[linepos+4];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+8];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
src64 = (__m64 *)&scanline[linepos+12];
rgbx16 = *src64;
gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit
rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit
rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit
sumx16 = _mm_adds_pi16(sumx16, rgbx16);
sumx16 = _mm_adds_pi16(sumx16, overflowprotect);
sumx16 = _mm_subs_pu16(sumx16, overflowprotect);
sumx16 = _mm_slli_pi16(sumx16, 2);
dst64 = (__m64 *)&RGB48[xx];
*dst64 = sumx16;
}
else
#endif
{
int i,r=0,g=0,b=0,a=0;
for(i=0; i<4; i++)
{
if(xp<=0 || xp>= width)
{
gains += lanczos[rmdr]>>1;
}
else
{
gains += lanczos[rmdr]>>1;
r += (gains * scanline[xp*4]);
g += (gains * scanline[xp*4+1]);
b += (gains * scanline[xp*4+2]);
a += (gains * scanline[xp*4+3]);
gains = 0;
}
xp++;
rmdr+=64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if(r<0) r=0; else if(r>65535) r=65535;
if(g<0) g=0; else if(g>65535) g=65535;
if(b<0) b=0; else if(b>65535) b=65535;
if(a<0) a=0; else if(a>65535) a=65535;
RGB48[xx] = r;
RGB48[xx+1] = g;
RGB48[xx+2] = b;
RGB48[xx+3] = a;
}
xx+=4;
}
}
}
#if MMXSUPPORTED //TODO DANREMOVE
//_mm_empty();
#endif
}
void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask)
{
float line = (float)width * fabsf(windowMask);
int pixelbytes = 6;
float frac = (float)(line-(float)((int)line));
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
pixelbytes = 8;
break;
}
if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed
{
short *ptrL = (short *)RGB48;
short *ptrR = (short *)RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
else
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
if(windowMask < 0)
channel = channel == 0 ? 1 : 0;
if(pixelbytes == 6)
{
if(channel == 0)
{
memset(ptrL, 0, 6*(int)line);
ptrL += ((int)line*3);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*3);
memset(ptrR, 0, 6*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
}
}
else
{
if(channel == 0)
{
memset(ptrL, 0, 8*(int)line);
ptrL += ((int)line*4);
ptrL[0] = (int)((float)ptrL[0] * (1.0-frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0-frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0-frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0-frac));
}
else
{
ptrR += ((width-(int)line)*4);
memset(ptrR, 0, 8*(int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac));
}
}
}
}
void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t1,t2,t3;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
ptrL += 3;
ptrR -= 3;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
memcpy(ptr, RGB48, (nwidth)*3*2);
ptr += (nwidth)*3;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*3];//r
*ptr++ = RGB48[(x+xposi-2)*3+1];//g
*ptr++ = RGB48[(x+xposi-2)*3+2];//b
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
ptr += (width-xposi)*3;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf,remainf;
int xposi,tablepos,x;
int gainA,gainB,gainC,gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0,shift = 0;
__m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2;
__m128i *line128, *outline128;
if(flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*4) - 4;
for(x=0;x<width/2;x++)
{
int t1,t2,t3,t4;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
t4 = ptrL[2];
ptrL[3] = ptrR[3];
ptrR[3] = t4;
ptrL += 4;
ptrR -= 4;
}
}
if(offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf*(float)SUBPIXEL);
xposi = abs(xposi);
if(xposi==0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if(neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-xposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<xposi+2;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
memcpy(ptr, RGB48, (nwidth)*4*2);
ptr += (nwidth)*4;
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+xposi-2>=0)
{
*ptr++ = RGB48[(x+xposi-2)*4];//r
*ptr++ = RGB48[(x+xposi-2)*4+1];//g
*ptr++ = RGB48[(x+xposi-2)*4+2];//b
*ptr++ = RGB48[(x+xposi-2)*4+3];//a
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2);
ptr += (width-xposi)*4;
for(x=0;x<xposi+16;x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2,
//l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4,
//l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6,
//l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8,
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*4; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0
//t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4
//t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4
//l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0
//t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0
//t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4
//t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4
//l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_srli_si128(l2,4*2);
t2 = _mm_slli_si128(t2,4*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0
//t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5
//t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,4*2);
t2 = _mm_slli_si128(l3,4*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128,t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width,
float offsetR, float offsetG, float offsetB ,
int flipR, int flipG, int flipB)
{
float Rxposf,Rremainf;
int Rxposi,Rtablepos;
float Gxposf,Gremainf;
int Gxposi,Gtablepos;
float Bxposf,Bremainf;
int Bxposi,Btablepos;
int x;
int RgainA,RgainB,RgainC,RgainD;
int GgainA,GgainB,GgainC,GgainD;
int BgainA,BgainB,BgainC,BgainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int negR = 0;
int negG = 0;
int negB = 0;
int shift = 0;
__m128i l1,l2,l3,o128,t1,t2;
__m128i *line128, *outline128;
__m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3;
if(flipR)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipG)
{
unsigned short *ptrL = &RGB48[1];
unsigned short *ptrR = &RGB48[1];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(flipB)
{
unsigned short *ptrL = &RGB48[2];
unsigned short *ptrR = &RGB48[2];
ptrR += (width*3) - 3;
for(x=0;x<width/2;x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if(offsetR < 0.0)
negR = 1;
if(offsetG < 0.0)
negG = 1;
if(offsetB < 0.0)
negB = 1;
Rxposf = width * offsetR;
Rxposi = (int)floorf(Rxposf);
Rremainf = Rxposf - (float)Rxposi;
Rtablepos = (int)(Rremainf*(float)SUBPIXEL);
Gxposf = width * offsetG;
Gxposi = (int)floorf(Gxposf);
Gremainf = Gxposf - (float)Gxposi;
Gtablepos = (int)(Gremainf*(float)SUBPIXEL);
Bxposf = width * offsetB;
Bxposi = (int)floorf(Bxposf);
Bremainf = Bxposf - (float)Bxposi;
Btablepos = (int)(Bremainf*(float)SUBPIXEL);
Rxposi = abs(Rxposi);
Gxposi = abs(Gxposi);
Bxposi = abs(Bxposi);
if(Rxposi==0 && Rtablepos == 0)
return; // no move required
RgainA = gains[Rtablepos][0];
RgainB = gains[Rtablepos][1];
RgainC = gains[Rtablepos][2];
RgainD = gains[Rtablepos][3];
GgainA = gains[Gtablepos][0];
GgainB = gains[Gtablepos][1];
GgainC = gains[Gtablepos][2];
GgainD = gains[Gtablepos][3];
BgainA = gains[Btablepos][0];
BgainB = gains[Btablepos][1];
BgainC = gains[Btablepos][2];
BgainD = gains[Btablepos][3];
if(negR == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Rxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Rxposi+2;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Rxposi-2>=0)
{
*ptr++ = RGB48[(x+Rxposi-2)*3];//r
ptr++;//g
ptr++;//b
}
else
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Rxposi;x<width;x++)
{
*ptr++ = RGB48[x*3];//r
ptr++;//g
ptr++;//b
}
for(x=0;x<Rxposi+16;x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
if(negG == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Gxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Gxposi+2;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Gxposi-2>=0)
{
ptr++;//r
*ptr++ = RGB48[(x+Gxposi-2)*3+1];//g
ptr++;//b
}
else
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Gxposi;x<width;x++)
{
ptr++;//r
*ptr++ = RGB48[x*3+1];//g
ptr++;//b
}
for(x=0;x<Gxposi+16;x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
if(negB == 0)
{
unsigned short *ptr = scanline;
int nwidth = width-Bxposi+16;
if(nwidth > width)
nwidth = width;
for(x=0;x<Bxposi+2;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
for(x=0;x<nwidth;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for(x=0;x<2;x++)
{
if(x+Bxposi-2>=0)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[(x+Bxposi-2)*3+2];//b
}
else
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for(x=Bxposi;x<width;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x*3+2];//b
}
for(x=0;x<Bxposi+16;x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA);
gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA);
gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA);
gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB);
gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB);
gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB);
gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC);
gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC);
gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC);
gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD);
gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD);
gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1,3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2,3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
shift = 3;
}
for(x=0;x<width*3; x+=8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA1);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_slli_si128(l2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,3*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC1);
o128 = _mm_adds_epi16(o128,t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1,3*2);
t2 = _mm_srli_si128(l2,6*2);
t2 = _mm_slli_si128(t2,5*2);
t1 = _mm_adds_epi16(t1,t2);
t2 = _mm_slli_si128(l3,7*2);
t1 = _mm_adds_epi16(t1,t2);
t1 = _mm_mulhi_epi16(t1, gD1);
o128 = _mm_adds_epi16(o128,t1);
t1 = gA1;
gA1 = gA2;
gA2 = gA3;
gA3 = t1;
t1 = gB1;
gB1 = gB2;
gB2 = gB3;
gB3 = t1;
t1 = gC1;
gC1 = gC2;
gC2 = gC3;
gC3 = t1;
t1 = gD1;
gD1 = gD2;
gD2 = gD3;
gD3 = t1;
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if(shift)
{
l3 = _mm_srli_epi16(l3,3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128,4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128,1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint)
{
int x,val,ypos=0,upos=1,vpos=3;
int step = 1,pos=0;
short *ssbase = (short *)sbase;
uint32_t *lbase = (uint32_t *)sbase;
ToolsHandle *tools = decoder->tools;
int scaledvectorscope = 0;
if(tools == NULL)
return;
if(whitepoint == 13)
{
if(format == DECODED_FORMAT_RG64)
format = DECODED_FORMAT_W13A;
else
format = DECODED_FORMAT_WP13;
}
while(width/step > 360)
{
step*=2;
}
tools->waveformWidth = width/step;
decoder->tools->blurUVdone = 0;
switch(format & 0xffffff)
{
case DECODED_FORMAT_WP13:
decoder->tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*3;
}
break;
case DECODED_FORMAT_W13A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = ssbase[0]>>5;
G = ssbase[1]>>5;
B = ssbase[2]>>5;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
ssbase += step*4;
}
break;
case DECODED_FORMAT_RG48:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[0]>>8;
G = sbase[1]>>8;
B = sbase[2]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*3;
}
break;
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG30:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_AR10:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = lbase[x];
B = (val>>22)&0xff;
G = (val>>12)&0xff;
R = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_R210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>22)&0xff;
G = (val>>12)&0xff;
B = (val>>02)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_DPX0:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
val = SwapInt32BtoN(lbase[x]);
R = (val>>24)&0xff;
G = (val>>14)&0xff;
B = (val>>04)&0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int32_t R,G,B,U,V;
R = sbase[1]>>8;
G = sbase[2]>>8;
B = sbase[3]>>8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
sbase += step*4;
}
break;
case COLOR_FORMAT_UYVY:
ypos=1,upos=0,vpos=2;
case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109
case COLOR_FORMAT_YUYV:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 2;
Y = bptr[ypos]-16;
U = bptr[upos]-128;
Y+= bptr[ypos+2]-16; Y>>=1;
V = bptr[vpos]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
//TODO much -20 to 120 RGB range.
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
//* 255.0/314.0
//* 255.0/244.0
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_YU64:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
bptr++; //read only the high byte out of the 16-bit
Y = bptr[0]-16;
V = bptr[2]-128;
Y+= bptr[4]-16; Y>>=1;
U = bptr[6]-128;
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_V210:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int Y,U,V,R,G,B;
uint32_t *lptr = (uint32_t *)sbase;
lptr += (x/6)*4;
switch(x % 6)
{
case 0:
V = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
U = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 1:
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
V = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
lptr--;
U = ((*lptr>>22) & 0xff) - 128;
break;
case 2:
lptr++;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1;
V = ((*lptr>>22) & 0xff) - 128;
break;
case 3:
lptr++;
V = ((*lptr>>12) & 0xff) - 128;
lptr++;
U = ((*lptr>>02) & 0xff) - 128;
Y = ((*lptr>>12) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
case 4:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
Y = ((*lptr>>02) & 0xff) - 16;
U = ((*lptr>>12) & 0xff) - 128;
Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1;
break;
case 5:
lptr+=2;
V = ((*lptr>>22) & 0xff) - 128;
lptr++;
U = ((*lptr>>12) & 0xff) - 128;
Y = ((*lptr>>22) & 0xff) - 16;
lptr++;
Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1;
break;
}
R = (9535*Y + 14688*V)>>13; //13-bit white
G = (9535*Y - 4375*V - 1745*U)>>13;
B = (9535*Y + 17326*U)>>13;
if(R > 255) R = 255;
if(R < 0) R = 0;
if(G > 255) G = 255;
if(G < 0) G = 0;
if(B > 255) B = 255;
if(B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if(scaledvectorscope)
{
U *= 255; U /= 314;
V *= 255; V /= 244;
}
U += 128;
V += 128;
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB24:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 3;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB32:
tools->histogram = 1;
for(x=0,pos=0; x<width; x+=step,pos++)
{
int R,G,B,U,V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if(scaledvectorscope)
{
U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0
V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128;
V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128;
}
if(U<0) U=0; if(U>255) U=255;
if(V<0) V=0; if(V>255) V=255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_BYR2:
case COLOR_FORMAT_BYR4:
//do nothing
break;
default:
assert(0);
#if (0 && DEBUG)
fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n");
#endif
break;
}
}
void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR)
{
#if 1
int x,RL,GL,BL,RR,GR,BR;
int nRL,nGL,nBL;
int nRR,nGR,nBR;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
RL = sbaseL[0]>>6;
GL = sbaseL[1]>>6; //10-bit
BL = sbaseL[2]>>6;
RL*=RL;
GL*=GL; //20-bit
BL*=BL;
RR = sbaseR[0]>>6;
GR = sbaseR[1]>>6; //10-bit
BR = sbaseR[2]>>6;
RR*=RR;
GR*=GR; //20-bit
BR*=BR;
nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit
nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL;
nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL;
nRL >>= 10; //20-bit
nGL >>= 10;
nBL >>= 10;
if(nRL>max) nRL=max; if(nRL<0) nRL=0;
if(nGL>max) nGL=max; if(nGL<0) nGL=0;
if(nBL>max) nBL=max; if(nBL<0) nBL=0;
if(sqrttable[nRL] == 65535)
sqrttable[nRL] = (int)sqrt(nRL);
if(sqrttable[nGL] == 65535)
sqrttable[nGL] = (int)sqrt(nGL);
if(sqrttable[nBL] == 65535)
sqrttable[nBL] = (int)sqrt(nBL);
sbaseL[0] = sqrttable[nRL]<<6;
sbaseL[1] = sqrttable[nGL]<<6;
sbaseL[2] = sqrttable[nBL]<<6;
sbaseL += 3;
nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit
nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR;
nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR;
nRR >>= 10; //20-bit
nGR >>= 10;
nBR >>= 10;
if(nRR>max) nRR=max; if(nRR<0) nRR=0;
if(nGR>max) nGR=max; if(nGR<0) nGR=0;
if(nBR>max) nBR=max; if(nBR<0) nBR=0;
if(sqrttable[nRR] == 65535)
sqrttable[nRR] = (int)sqrt(nRR);
if(sqrttable[nGR] == 65535)
sqrttable[nGR] = (int)sqrt(nGR);
if(sqrttable[nBR] == 65535)
sqrttable[nBR] = (int)sqrt(nBR);
sbaseR[0] = sqrttable[nRR]<<6;
sbaseR[1] = sqrttable[nGR]<<6;
sbaseR[2] = sqrttable[nBR]<<6;
sbaseR += 3;
}
#else // works and fast but has not image linearization, not as good
__m128i *ptrL = (__m128i *)sbaseL;
__m128i *ptrR = (__m128i *)sbaseR;
__m128i t,L,R,nL,nR;
int x,width8 = (width*3) & ~7;
__m128i white_epi16 = _mm_set1_epi16(32767);
__m128i leak_epi16 = _mm_set1_epi16(ileak>>1);
__m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1));
for(x=0;x<width8;x+=8)
{
L = _mm_load_si128(ptrL);
R = _mm_load_si128(ptrR);
L = _mm_srli_epi16(L,1); //15-bit
R = _mm_srli_epi16(R,1); //15-bit
nL = _mm_mulhi_epi16(L, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nL = _mm_adds_epi16(nL, t);
t = _mm_mulhi_epi16(R, leak_epi16);
nL = _mm_subs_epu16(nL, t);
nR = _mm_mulhi_epi16(R, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nR = _mm_adds_epi16(nR, t);
t = _mm_mulhi_epi16(L, leak_epi16);
nR = _mm_subs_epu16(nR, t);
L = _mm_slli_epi16(nL,2);
R = _mm_slli_epi16(nR,2);
_mm_store_si128(ptrL++, L);
_mm_store_si128(ptrR++, R);
}
#endif
}
void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
#if 1
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit
nG = G*(1023-ileakR) + ileakR*max - R*ileakR;
nB = B*(1023-ileakR) + ileakR*max - R*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
#elif 0
int x;
float R,G,B;
float nR,nG,nB;
float fleakL = (float)ileakL / 65535.0;
float fleakR = (float)ileakR / 65535.0;
for(x=0;x<width;x++)
{
R = sbase[0];
G = sbase[1];
B = sbase[2];
R /= 65535.0;
G /= 65535.0;
B /= 65535.0;
R *= R;
G *= G;
B *= B;
nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL;
nG = G*(1.0-fleakR) + fleakR - R*fleakR;
nB = B*(1.0-fleakR) + fleakR - R*fleakR;
if(nR<0) nR=0;
if(nG<0) nG=0;
if(nB<0) nB=0;
nR = sqrt(nR);
nG = sqrt(nG);
nB = sqrt(nB);
sbase[0] = nR * 65535.0;
sbase[1] = nG * 65535.0;
sbase[2] = nB * 65535.0;
sbase += 3;
}
#elif 0
__m128i RGBRGB,rgb_epi32,RGB1,RGB2;
__m128i zero_epi128 = _mm_setzero_si128();
int x,width6 = (width*3) / 6 * 6;
__m128 white_ps = _mm_set1_ps(1.0);
__m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0));
__m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0);
__m128 scale_ps = _mm_set1_ps(65535.0);
__m128 scalehalf_ps = _mm_set1_ps(32767.0);
__m128 zero_ps = _mm_set1_ps(0.0);
__m128 rgb_ps, alt_rgb_ps;
__m128i sub_epi32;
__m128 sub_ps;
for(x=0;x<width6;x+=6) // two RGB pairs
{
int R,G,B;
RGBRGB = _mm_loadu_si128((__m128i *)sbase);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB1 = _mm_cvtps_epi32(rgb_ps);
RGB1 = _mm_packs_epi32 (RGB1, zero_epi128);
RGB1 = _mm_slli_si128(RGB1, 10);
RGB1 = _mm_srli_si128(RGB1, 10);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G+=B;
G>>=1;
sub_epi32 = _mm_set_epi32(G,R,R,G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB2 = _mm_cvtps_epi32(rgb_ps);
RGB2 = _mm_packs_epi32 (RGB2, zero_epi128);
RGB2 = _mm_slli_si128(RGB2, 6);
RGB1 = _mm_adds_epi16(RGB1, RGB2);
RGB1 = _mm_slli_epi16(RGB1, 1);
RGB1 = _mm_slli_si128(RGB1, 4);
RGB1 = _mm_srli_si128(RGB1, 4);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
RGBRGB = _mm_slli_si128(RGBRGB, 12);
RGBRGB = _mm_adds_epi16(RGB1, RGBRGB);
_mm_storeu_si128((__m128i *)sbase, RGBRGB);
sbase += 6;
}
#endif
}
void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - B*ileakL;
nG = G*(1023-ileakL) + ileakL*max - B*ileakL;
nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x,R,G,B;
int nR,nG,nB;
int max = 1024*1024-1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL>>=6;
ileakR>>=6;
if(sqrttable == NULL)
return;
for(x=0;x<width;x++)
{
R = sbase[0]>>6;
G = sbase[1]>>6; //10-bit
B = sbase[2]>>6;
R*=R;
G*=G; //20-bit
B*=B;
nR = R*(1023-ileakL) + ileakL*max - G*ileakL;
nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR;
nB = B*(1023-ileakL) + ileakL*max - G*ileakL;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if(nR>max) nR=max; if(nR<0) nR=0;
if(nG>max) nG=max; if(nG<0) nG=0;
if(nB>max) nB=max; if(nB<0) nB=0;
if(sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if(sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if(sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR]<<6;
sbase[1] = sqrttable[nG]<<6;
sbase[2] = sqrttable[nB]<<6;
sbase += 3;
}
}
void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank)
{
uint16_t *scratchline,*scratchline2,*scratchline3;
uint16_t *sptr;
uint16_t *srclineA,*srclineB;
uint16_t *dstlineA,*dstlineB;
int x,y2;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
int sskip = 3;
uint8_t *bptr1;
uint8_t *bptr2;
uint8_t *baseptr1;
uint8_t *baseptr2;
float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL;
float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR;
float frameTilt = decoder->cfhddata.channel[0].FrameTilt;
float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset;
float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset;
float rotOffset = decoder->cfhddata.channel[1].RotationOffset;
float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset;
float horizOffsetStep = 0;
float horizOffsetStepR = 0;
int flip1=0,flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
int source_pitch1 = source_pitch;
int source_pitch2 = source_pitch;
uint8_t *outputline = output+y*pitch;
uint8_t *outputline2 = NULL;
float horizOffsetBase;
float rotOffsetBase;
float horizOffsetBaseR;
float rotOffsetBaseR;
int formatdone = 0;
float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
float zoom;
float zoomR;
float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom;
float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom;
float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom;
float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom;
float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom;
float frameHDynamic = decoder->cfhddata.FrameHDynamic;
float frameHDynCenter = decoder->cfhddata.FrameHDynCenter;
float frameHDynWidth = decoder->cfhddata.FrameHDynWidth;
float frameHScale = decoder->cfhddata.FrameHScale;
int alphachannel = 0;
int whitepoint = 16;
float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen;
float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen;
float vignette = decoder->cfhddata.channel[0].user_vignette_start;
int flip_LR = 0;
float vig_r1;
float vig_r2;
float vig_gain;
if(blank) // blankline, no shifts required
{
windowMaskL = 0;
windowMaskR = 0;
frameTilt = 0;
horizOffset = 0;
horizOffsetR = 0;
rotOffset = 0;
rotOffsetR = 0;
frameZoom1 = 1.0;
frameZoom2 = 1.0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
frameHScale = 1.0;
frameHDynamic = 1.0;
frameHDynCenter = 0.5;
frameHDynWidth = 0.0;
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(xmax == 0.0) xmax = 1.0;
if(ymax == 0.0) ymax = 1.0;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
if(decoder->source_channels < 2) // 2D
{
channel_flip &= 0x3;
channel_flip |= channel_flip<<2;
decoder->cfhddata.channel_flip = channel_flip;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpenL = 0.0;
blursharpenR = 0.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
horizOffset = rotOffset = 0;
horizOffsetR = rotOffsetR = 0;
frameTilt = 0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
horizOffset += decoder->cfhddata.FrameOffsetX;
horizOffsetR -= decoder->cfhddata.FrameOffsetX;
frameZoom1 += frameHScale - 1.0f;
frameZoom2 += frameHScale - 1.0f;
if(frameHDynamic != 1.0)
{
frameZoom1 += 0.00001f;
frameZoom2 += 0.00001f;
}
if(vignette != 0.0)
{
float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width));
vig_r1 = (vignette+1.0f);
vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f);
vig_gain = decoder->cfhddata.channel[0].user_vignette_gain;
vig_r1 *= vig_diag;
vig_r2 *= vig_diag;
}
}
else
{
frameZoom1 = 1.0f;
frameZoom2 = 1.0f;
vignette = 0;
}
zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1;
if(frameDiffZoom2 != 0.0)
zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2;
else
zoomR = 0.0;
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
if(decoder->cfhddata.InvertOffset)
{
rotOffset = -rotOffset;
rotOffsetR = -rotOffsetR;
rotOffset -= decoder->cfhddata.FrameOffsetR;
rotOffsetR -= -decoder->cfhddata.FrameOffsetR;
}
else
{
rotOffset += decoder->cfhddata.FrameOffsetR;
rotOffsetR += -decoder->cfhddata.FrameOffsetR;
}
}
rotOffsetBase = rotOffset;
horizOffsetBase = horizOffset;
rotOffsetBaseR = rotOffsetR;
horizOffsetBaseR = horizOffsetR;
horizOffset -= rotOffset * 0.5f;
horizOffsetStep = rotOffset / (float)height;
horizOffsetR -= rotOffsetR * 0.5f;
horizOffsetStepR = rotOffsetR / (float)height;
horizOffset += horizOffsetStep * y;
horizOffsetR += horizOffsetStepR * y;
assert(bufferremain >= width * 8 * 2 * 2);
baseptr1 = source_buffer;
baseptr2 = source_buffer + channel_offset;
if(channel_flip & 0xf)
{
if(channel_flip & 1)
{
flip1 = 1;
}
if(channel_flip & 4)
{
flip2 = 1;
}
}
if(source_pitch1 < 0)
flip_LR = 1;
decoder->sharpen_flip = 0;
if(channel_flip & 2) //ProcessLine3D
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
}
else
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
}
if(channel_flip & 8)
{
if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
baseptr1 += source_pitch1*(height-1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
else
{
baseptr2 += source_pitch2*(height-1);
source_pitch2 = -source_pitch2;
}
}
bptr1 = baseptr1 + y*source_pitch1;
bptr2 = baseptr2 + y*source_pitch2;
y2 = y;
if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView
{
if(y2 < height/4)
{
blank = 1;
y2 = 0;
}
else
{
y2 -= height/4;
y2 *= 2;
if(y2 >= height-1)
{
blank = 1;
y2 = height - 2;
}
}
bptr1 = baseptr1 + y2*source_pitch1;
bptr2 = baseptr2 + y2*source_pitch2;
}
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ;
if(alphachannel)
{
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ;
}
dstlineA = sptr = scratchline;
dstlineB = scratchline3;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
whitepoint = 16;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_W13A:
whitepoint = 13;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_WP13:
whitepoint = 13;
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RG48:
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
if(blank)
{
if(srclineA)
memset(srclineA, 0, width*skip);
if(srclineB && decoder->channel_decodes > 1)
memset(srclineB, 0, width*skip);
}
if(blursharpenL != 0.0 || blursharpenR != 0.0)
{
if(decoder->channel_blend_type == BLEND_FREEVIEW ||
decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
decoder->channel_blend_type == BLEND_LINE_INTERLEAVED
)
{
decoder->doVerticalFilter = 0;
}
else
{
decoder->doVerticalFilter = 1;
}
}
{
switch(decoder->channel_blend_type)
{
case BLEND_FREEVIEW:
case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side
if(!blank)
{
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
dstlineA = srclineA;
sptr = dstlineA;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2);
}
else
{
int16_t *ptr;
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(!alphachannel)
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else
{
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
int cwidth= width/2;
if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth= width;
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
dstlineA = srclineA;
ptr = (int16_t *)srclineA;
for(x=0; x<width/2; x++)
{
*ptr++ = (ptr1[0]+ptr1[3])>>1;
*ptr++ = (ptr1[1]+ptr1[4])>>1;
*ptr++ = (ptr1[2]+ptr1[5])>>1 ;
ptr1+=sskip*2;
}
for(; x<width; x++)
{
*ptr++ = (ptr2[0]+ptr2[3])>>1;
*ptr++ = (ptr2[1]+ptr2[4])>>1;
*ptr++ = (ptr2[2]+ptr2[5])>>1;
ptr2+=sskip*2;
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, dstlineA, width/2, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2);
memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2);
memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2);
}
}
break;
case BLEND_STACKED_ANAMORPHIC: //stacked
case BLEND_LINE_INTERLEAVED: //fields
if((y & 1) == 1) return;
if(!blank)
{
uint16_t *ptrA1 = (uint16_t *)srclineA;
uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1);
uint16_t *ptrB1 = (uint16_t *)srclineB;
uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1);
FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip);
FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip);
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(decoder->doVerticalFilter == 0)
{
if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline2 = output+(y>>1)*pitch;
outputline = output+((y>>1)+(height/2))*pitch;
}
else
{
outputline = output+(y>>1)*pitch;
outputline2 = output+((y>>1)+(height/2))*pitch;
}
}
else //fields
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline = output+(y)*pitch;
outputline2 = output+(y+1)*pitch;
}
else
{
outputline2 = output+(y)*pitch;
outputline = output+(y+1)*pitch;
}
}
if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right
{
uint8_t *tmp = outputline2;
outputline2 = outputline;
outputline = tmp;
}
}
else
{
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2, srclineA, width*skip);
memcpy(srclineA, srclineB, width*skip);
memcpy(srclineB, scratchline2, width*skip);
}
}
}
break;
case BLEND_ONION: //onion
case BLEND_DIFFERENCE: //difference
case BLEND_SPLITVIEW: //splitView
if(!blank)
{
//dstlineA = source_buffer;
//dstlineA += (source_pitch>>1) * y;
sptr = dstlineA = srclineA;
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
x = 0;
if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view
{
int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255;
for(x = xsplit*sskip; x<width*sskip; x++)
{
srclineA[x] = srclineB[x];
}
}
else if(decoder->channel_blend_type == BLEND_ONION) //onion
{
FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip);
}
else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference
{
#if XMMOPT
int width8 = (width*sskip) & 0xfff8;
__m128i mid_epi16;
//int unaligned = ((int)sbase) & 15;
//unaligned += ((int)in_rgb8) & 15;
if(whitepoint == 13)
mid_epi16 = _mm_set1_epi16(0x0fff);
else
mid_epi16 = _mm_set1_epi16(0x1fff);
for(x=0; x<width8; x+=8)
{
__m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]);
__m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]);
// 0 to 0xffff
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff
}
else
{
rgb16A = _mm_subs_epi16(rgb16A, rgb16B);
}
rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff
_mm_store_si128((__m128i *)&dstlineA[x], rgb16A);
}
#endif
for(; x<width*sskip; x++)
{
int val;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
val = (srclineB[x] - srclineA[x]) + 32768;
}
else
{
val = (srclineA[x] - srclineB[x]) + 32768;
}
if(val > 0x7fff) val = 0x7fff;
if(val < 0) val = 0;
dstlineA[x] = val;
}
}
}
break;
case BLEND_ANAGLYPH_RC:
case BLEND_ANAGLYPH_RC_BW:
case BLEND_ANAGLYPH_AB:
case BLEND_ANAGLYPH_AB_BW:
case BLEND_ANAGLYPH_GM:
case BLEND_ANAGLYPH_GM_BW:
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
uint16_t *sptr1 = scratchline2;
uint16_t *sptr2 = scratchline3;
dstlineA = (uint16_t *)bptr1;
// dstlineA += (source_pitch>>1) * y;
sptr = dstlineA;
sptr1 = srclineA = (uint16_t *)bptr1;
sptr2 = srclineB = (uint16_t *)bptr2;
if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if(!alphachannel)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if(xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if(xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
uint16_t *tmp = srclineA;
srclineA = srclineB;
srclineB = tmp;
}
switch(decoder->channel_blend_type)
{
case BLEND_ANAGLYPH_RC:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_RC_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if(decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for(x=0; x<width; x++)
{
int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4;
int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
int r,g,b;
for(x=0; x<width; x++)
{
r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000;
g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000;
b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000;
if(r<0) r=0; if(r>0x3fff) r=0x3fff;
if(g<0) g=0; if(g>0x3fff) g=0x3fff;
if(b<0) b=0; if(b>0x3fff) b=0x3fff;
sptr[0] = r;
sptr[1] = g;
sptr[2] = b;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
break;
}
}
break;
case BLEND_NONE:
default:
if(decoder->channel_decodes == 1) // only one channel
{
if(skip == 8)
{
//the data is already in the correct format
sptr = (unsigned short *)bptr1;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else if(skip == 6)
{
//the data is already in the correct format
dstlineA = sptr = (unsigned short *)srclineA;
// shift if needed.
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(decoder->channel_current == 0)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
if(vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
}
if(decoder->channel_current == 0)
{
if(blursharpenL != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
}
}
else
{
if(blursharpenR != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip);
}
}
}
if ((windowMaskL && decoder->channel_current == 0) || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
if(decoder->channel_current != 0) mask = xmin;
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
RGB48WindowMask(decoder, srclineA, width, 0, mask);
}
if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
if(decoder->channel_current != 1) mask = (1.0f-xmax);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR);
RGB48WindowMask(decoder, srclineA, width, 1, mask);
}
}
else
{
outputline2 = output+(y+height)*pitch;
if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
else
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
if(windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if(windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
}
if(windowMaskR || (1.0-xmax))
{
float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if(windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
}
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
}
break;
}
}
if(!formatdone)
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
flags = 0;
whitebitdepth = 13;
}
if(outputline2)
{
// if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
else
{
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
//{
// if(alphachannel)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth);
// else
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
//}
if(decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
}
}
void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index)
{
uint16_t *sbase;//*sbase2 = NULL;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
//int flip1=0;//flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
//int local_pitch1 = local_pitch;
//int local_pitch2 = local_pitch;
uint8_t *outputline = output+y*pitch;
//uint8_t *outputline2 = NULL;
short *scratch;
//int formatdone = 0;
//float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
//float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
//float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
int alphachannel = 0;
float blursharpen = 0;
int line_max = decoder->frame.height;
int yy = y;
if(decoder->channel_current == 0)
blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
else
blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpen = 0.0;
}
if(decoder->channel_mix_half_res == 1)
line_max *= 2;
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->sharpen_flip) //SharpenLine
{
//if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1)
{
yy = (line_max - 1 - y);
outputline = output+yy*pitch;
}
}
if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_W13A:
skip = 8;
break;
case DECODED_FORMAT_WP13:
skip = 6;
break;
case DECODED_FORMAT_RG48:
skip = 6;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
scratch = (short*)(buffer + width * skip * thread_index);
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A))
{
int use_pitch = local_pitch;
int edgeclose = 0;
flags = 0;
whitebitdepth = 13;
if(blursharpen != 0.0 && local_pitch != 0)
{
short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr;
switch(decoder->channel_blend_type)
{
case BLEND_STACKED_ANAMORPHIC:
sbase = (uint16_t *)local_output;
sbase += (local_pitch>>1) * y * 2;
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
case BLEND_LINE_INTERLEAVED:
sbase = (uint16_t *)local_output;
if(y & 1)
{
y--;
sbase += (local_pitch>>1) * y;
}
else
{
sbase += (local_pitch>>1) * y;
sbase += channel_offset>>1;
}
if(y<=8) edgeclose = 1;
if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase;
if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase;
if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase;
if(y>=height-8) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
default:
if(y<=4) edgeclose = 1;
if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase;
if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase;
if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase;
if(y>=height-4) edgeclose = 1;
use_pitch = local_pitch;
break;
}
if(skip == 8)
{
FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
else
{
FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
sbase = (uint16_t *)scratch;
}
}
if(alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
#if _GRAPHICS
void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x,y,v,width, height;
int maxR=0,maxG=0,maxB=0;
width = decoder->frame.width;
height = decoder->frame.height;
if(decoder->cfhddata.BurninFlags == 0)
return;
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
decoder->frame.output_format = output_format;
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
{
int avgR=0,avgG=0,avgB=0;
// Post a message to the mailbox
mailbox->output = output;
if(height >= 1080)
{
mailbox->pitch = pitch*4; // only read every 4th scan line
workunits = height/4; // only read every 4th scan line
}
else if(height >= 540)
{
mailbox->pitch = pitch*2; // only read every 2th scan line
workunits = height/2; // only read every 2th scan line
}
else
{
mailbox->pitch = pitch; // read every scan line
workunits = height; // read every scan line
}
if(decoder->tools->histogram == 0)
{
mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
for(x=0;x<256;x++)
{
avgR += decoder->tools->histR[x];
avgG += decoder->tools->histG[x];
avgB += decoder->tools->histB[x];
//if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
//if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
//if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
avgR /= 256;
avgG /= 256;
avgB /= 256;
//maxR++;
//maxG++;
//maxB++;
decoder->tools->maxR = avgR*3;//maxR;
decoder->tools->maxG = avgG*3;//maxG;
decoder->tools->maxB = avgB*3;//maxB;
}
}
#endif
if(decoder->cfhddata.BurninFlags && DrawOpen(decoder))
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
#if _THREADED
//DrawInit(decoder);
//DrawStartThreaded(decoder);
if(decoder->draw_thread.pool.thread_count > 0)
{
DrawWaitThreaded(decoder);
}
else
#endif
{
DrawInit(decoder);
DrawMetadataObjects(decoder);
}
}
else
{
DrawInit(decoder);
}
if(decoder->drawSafeMarkers)
DrawSafeMarkers(decoder);
if(decoder->cfhddata.BurninFlags & 2) // tools
{
if(decoder->tools)
{
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16)
DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2)
DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4)
DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/);
if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8)
DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/);
}
}
DrawScreen(decoder, output, pitch, output_format);
}
#if 0
#if _THREADED && 1
if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
int targetW, targetH;
if(width < 256 || height < 256)
return;
targetW = width / 4;
targetH = height / 8;
mailbox->output = output;
mailbox->pitch = pitch;
workunits = targetW;
mailbox->jobType = JOB_TYPE_BURNINS; // burnin
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
if(decoder->histogram == 0)
{
for(y=0; y<height; y+=4)
{
uint8_t *bptr = output;
bptr += pitch * y;
HistogramLine(decoder, (unsigned short *)bptr, width, output_format);
if(decoder->histogram == 0)
return; // don't know how to create Histogram for that format
}
}
for(x=1;x<255;x++)
{
if(maxR < decoder->histR[x]) maxR = decoder->histR[x];
if(maxG < decoder->histG[x]) maxG = decoder->histG[x];
if(maxB < decoder->histB[x]) maxB = decoder->histB[x];
}
maxR++;
maxG++;
maxB++;
decoder->maxR = maxR;
decoder->maxG = maxG;
decoder->maxB = maxB;
for(x=0; x<targetW; x++)
{
HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH);
}
#endif
#endif
if(decoder->tools)
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
#endif
extern int geomesh_alloc_cache(void *gm);
#define DEG2RAD(d) (PI*(d)/180.0f)
#define RAD2DEG(r) (180.0f*(r)/PI)
bool approx_equal(int x, int y)
{
if(y > 1080)
{
x >>= 6;
y >>= 6;
}
else if(y > 540)
{
x >>= 5;
y >>= 5;
} else
{
x >>= 4;
y >>= 4;
}
if(x == y || x+1 == y || x == y+1)
return true;
return false;
}
bool approx_equal_float(float x, float y)
{
if (x*0.99 < y && y < x*1.01)
return true;
return false;
}
#if WARPSTUFF
void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int width, height;
//int maxR = 0, maxG = 0, maxB = 0;
int status = WARPLIB_SUCCESS;
CFHDDATA *cfhddata = &decoder->cfhddata;
int backgroundfill = cfhddata->lensFill;
float sensorcrop = 1.0;
float phi, theta, rho;
int srcLens = HERO4;
if (!cfhddata->doMesh) return;
if (decoder->lastLensOffsetX != cfhddata->LensOffsetX ||
decoder->lastLensOffsetY != cfhddata->LensOffsetY ||
decoder->lastLensOffsetZ != cfhddata->LensOffsetZ ||
decoder->lastLensOffsetR != cfhddata->LensOffsetR ||
decoder->lastLensZoom != cfhddata->LensZoom ||
decoder->lastLensFishFOV != cfhddata->LensFishFOV ||
decoder->lastLensGoPro != cfhddata->lensGoPro ||
decoder->lastLensSphere != cfhddata->lensSphere ||
decoder->lastLensFill != cfhddata->lensFill ||
decoder->lastLensStyleSel != cfhddata->lensStyleSel ||
memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) ||
memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) )
{
if (decoder->mesh)
geomesh_destroy(decoder->mesh);
width = decoder->frame.width;
height = decoder->frame.height;
if (approx_equal(width, height * 2)) // approx. 2:1
{
float outputaspect = 16.0f/9.0f;
srcLens = EQUIRECT;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
if (cfhddata->lensCustomSRC[1])
{
outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1];
if (outputaspect >= 1.0f && outputaspect <= 3.0f)
{
//float sourceratio = (float)width / (float)height;
if (approx_equal_float(outputaspect, 4.0f / 3.0f))
sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height)));
if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88;
sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image.
}
}
if (width >= 2496)
decoder->mesh = geomesh_create(199, 99);
else if (width >= 1272)
decoder->mesh = geomesh_create(99, 49);
else
decoder->mesh = geomesh_create(49, 25);
phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1
theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else if (approx_equal(width * 3, height * 4)) // approx. 4:3
{
srcLens = HERO4;
sensorcrop = 1.0;
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
else //if(approx_equal(width*9,height*16)) // approx. 16:9
{
srcLens = HERO4;
sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000
if (width > 2880) // UHD
decoder->mesh = geomesh_create(159, 119);
else if (width >= 1920) //HD/2.7K
decoder->mesh = geomesh_create(79, 59);
else
decoder->mesh = geomesh_create(39, 29);
phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9
theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9
rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg
}
if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill);
else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64)
status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill);
else
assert(0);
if (cfhddata->lensSphere == 1)
{
if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT
{
if (cfhddata->LensOffsetR != 0.0)
{
//float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159);
if (cfhddata->LensOffsetR < 0.0) angle = -angle;
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
if (cfhddata->LensFishFOV != 0.0) // DeFish
{
float fov = cfhddata->LensFishFOV;// *180.0;
if (fov > 89.9f) fov = 89.9f;
if (fov < -89.9f) fov = -89.9f;
if (fov)
status |= geomesh_transform_defish(decoder->mesh, fov);
}
}
switch (cfhddata->lensGoPro)
{
case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break;
case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break;
case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break;
case 4:
geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS);
else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS);
break;
}
}
else // old boring geometry
{
if (cfhddata->LensZoom != 1.0)
geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom);
// basic orthographic moves
if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0)
geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height);
if (cfhddata->LensOffsetR != 0.0)
{
float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f);
geomesh_transform_rotate(decoder->mesh, angle);
}
if (cfhddata->lensGoPro == 0) //Rectilear
status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop);
//status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT,
// width, height, product, model, lens_type, fov, (int)decoder->frame.resolution);
}
geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE
if (status == WARPLIB_SUCCESS)
{
if (decoder->lens_correct_buffer == NULL)
{
#if _ALLOCATOR
decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height);
#else
decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height);
#endif
}
}
else
{
return;
}
/* need resources?
{
if(decoder->tools == NULL)
{
#if _ALLOCATOR
decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle));
#else
decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle));
#endif
if(decoder->tools)
{
memset(decoder->tools, 0, sizeof(ToolsHandle));
}
else
{
return;
}
}
}
*/
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16,
WorkerThreadProc,
decoder);
}
#endif
{
// Post a message to the mailbox
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_CACHE;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#endif
//decoder->frame.output_format = output_format;
decoder->lastLensOffsetX = cfhddata->LensOffsetX;
decoder->lastLensOffsetY = cfhddata->LensOffsetY;
decoder->lastLensOffsetZ = cfhddata->LensOffsetZ;
decoder->lastLensOffsetR = cfhddata->LensOffsetR;
decoder->lastLensZoom = cfhddata->LensZoom;
decoder->lastLensFishFOV = cfhddata->LensFishFOV;
decoder->lastLensGoPro = cfhddata->lensGoPro;
decoder->lastLensSphere = cfhddata->lensSphere;
decoder->lastLensFill = cfhddata->lensFill;
decoder->lastLensStyleSel = cfhddata->lensStyleSel;
memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC));
memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST));
}
#if _THREADED && 1
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits = decoder->frame.height;
mailbox->data = decoder->mesh;
mailbox->output = output;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.height;
mailbox->chunk_size = 16;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(backgroundfill) // may need to blur the filled in areas
{
mailbox->data = decoder->mesh;
mailbox->output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer;
mailbox->line_max = decoder->frame.width;
mailbox->chunk_size = 16;
mailbox->pitch = pitch;
workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_WARP_BLURV;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else // not threading
{
//geomesh_cache_init_bilinear(decoder->mesh); //bad
geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good
geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height);
}
#endif
memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height);
/*
if(lens_correct_buffer)
#if _ALLOCATOR
Free(decoder->allocator, lens_correct_buffer);
#else
MEMORY_ALIGNED_FREE(lens_correct_buffer);
#endif
geomesh_destroy(mesh);
*/
}
void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format)
{
int x, y, width, height;
int minY, maxY;
int minX, maxX;
CFHDDATA *cfhddata = &decoder->cfhddata;
uint8_t *line = output;
uint32_t fillA = 0;
uint32_t fillB = 0;
int bitsize = 8;
if (!cfhddata->doMesh) return;
width = decoder->frame.width;
height = decoder->frame.height;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return;
if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return;
minY = (int)(decoder->cfhddata.LensYmin*(float)height);
maxY = (int)(decoder->cfhddata.LensYmax*(float)height);
minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch);
maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch);
if (FORMATRGB(output_format))
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
memset(line, 0, minX);
memset(line + maxX, 0, pitch - maxX);
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
memset(line, 0, abs(pitch));
line += pitch;
}
}
else
{
switch (output_format & 0x7fffffff)
{
case COLOR_FORMAT_YVYU:
case COLOR_FORMAT_YUYV:
fillA = 0x10;
fillB = 0x80;
break;
case COLOR_FORMAT_UYVY:
case COLOR_FORMAT_2VUY:
fillA = 0x80;
fillB = 0x10;
break;
case COLOR_FORMAT_YU64:
fillA = 0x8000;
fillB = 0x1000;
bitsize = 16;
break;
}
}
if (bitsize == 8)
{
line = output;
// Top rows
for (y = 0; y < minY; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
// Left and Right edges of middle rows
if (maxX - minX != pitch)
{
for (; y < maxY; y++)
{
for (x = 0; x < minX; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
for (x = maxX; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
//Bottom wows
y = maxY;
line = output + y*pitch;
for (; y < height; y++)
{
for (x = 0; x < pitch; x += 2)
{
line[x] = fillA;
line[x + 1] = fillB;
}
line += pitch;
}
}
}
#endif //#if WARPSTUFF
void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset)
{
uint8_t *local_output_double = local_output;
//Frame_Region emptyFrameMask = {0};
if(decoder->StereoBuffer)
local_output_double = local_output = (uint8_t *)decoder->StereoBuffer;
if(channel_offset < 0) // channel swapped
{
channel_offset = -channel_offset;
}
if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format))
{
local_output += local_pitch*(decoder->frame.height-1);
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1);
else
local_output_double = local_output;
local_pitch = -local_pitch;
}
if(FLIPCOLORS(output_format) || output_format & 0x80000000)
{
decoder->cfhddata.InvertOffset = 1;
}
else
{
decoder->cfhddata.InvertOffset = 0;
}
decoder->frame.format = output_format;
//decoder->frame.colorspace = COLOR_SPACE_CG_601;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[2].FrameTilt))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
// decoder->cfhddata.FrameOffsetX || ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0) ))
{
//int x;
int xbytes, xstep;
//uint8_t *base = local_output;
int width, height, chunk_size;
int fine_vertical = 0;
width = decoder->frame.width;
height = decoder->frame.height;
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xbytes = width*8;
xstep = 32;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
xbytes = width*6;
xstep = 32;
break;
default:
assert(0);
break;
}
if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) ||
(decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 &&
decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 &&
decoder->cfhddata.FrameOffsetR == 0.0))
{
chunk_size = 8;
}
else
{
chunk_size = 1;
if((fabs(decoder->cfhddata.channel[1].RotationOffset) +
fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 ||
(fabs(decoder->cfhddata.channel[2].RotationOffset) +
fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015)
{
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xstep = 4;
break;
case DECODED_FORMAT_RGB24:
xstep = 3;
break;
case DECODED_FORMAT_YUYV:
xstep = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xstep = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xstep = 6;
break;
}
fine_vertical = 1;
}
}
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
(decoder->frame.resolution == DECODED_RESOLUTION_FULL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) &&
decoder->codec.progressive == false)
{
int interlaced_pitch = local_pitch * 2;
uint8_t *field2_output = local_output + local_pitch;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->chunk_size = chunk_size;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
// Post a message to the mailbox
mailbox->local_output = field2_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
else
{
//TODO Lens corect here.
//call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.)
// JOB_TYPE_HORIZONTAL_3D
//before doing any offset and rotation corrections.
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129
width /= 2;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep-1)/xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
decoder->doVerticalFilter = 0;
mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if(decoder->doVerticalFilter)
{
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output_double;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0)
mailbox->line_max *= 2;
if(decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else
{
int y,width, height;
uint8_t scratch[4096*16];
int scratchremain = 4096*16;
int ymin = 0, ymax;
width = decoder->frame.width;
height = decoder->frame.height;
ymax = height;
if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32))
{
ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY;
ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY;
}
if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
{
int x,xbytes, xstep;
uint8_t *base = local_output;
float voffsetstep;
float voffset = decoder->cfhddata.channel[1].VerticalOffset;
float roffset = decoder->cfhddata.channel[1].RotationOffset;
float voffset1, voffset2;
float voffsetstep1, voffsetstep2;
int channel_flip = decoder->cfhddata.channel_flip;
int aspectx,aspecty;
float aspectfix;
GetDisplayAspectRatio(decoder, &aspectx, &aspecty);
aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty);
if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
voffset = roffset = 0;
}
if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
voffset += decoder->cfhddata.FrameOffsetY;
if(decoder->cfhddata.InvertOffset)
{
voffset = -voffset;
roffset = -roffset;
}
switch(decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width*4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width*3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width*2;
xstep = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xbytes = width*6;
xstep = 32;
break;
}
//DAN20100923 -- simplied
//voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5;
//voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep);
voffset += roffset * aspectfix * 0.5;
voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep);
if(roffset == 0.0)
xstep = xbytes;
voffset1 = voffset2 = voffset;
voffsetstep1 = voffsetstep2 = voffsetstep;
if(channel_flip & 0xf)
{
if(channel_flip & 2)
{
voffset1 = -voffset1;
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 8)
{
voffset2 = -voffset2;
voffsetstep2 = -voffsetstep2;
}
if(channel_flip & 1)
{
voffset1 += voffsetstep1*(xbytes/xstep);
voffsetstep1 = -voffsetstep1;
}
if(channel_flip & 4)
{
voffset2 += voffsetstep2*(xbytes/xstep);
voffsetstep2 = -voffsetstep2;
}
}
for(x=0; x<xbytes; x+=xstep)
{
if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
else
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, voffset1);
}
if(decoder->channel_decodes == 2)
{
uint8_t *bptr = base + channel_offset;
RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
base += xstep;
voffset1 += voffsetstep1;
voffset2 += voffsetstep2;
}
}
if(decoder->channel_mix_half_res == 1)
height *= 2;
if(ymin)
{
memset(local_output, 0, abs(local_pitch)); // zero one line;
}
for(y=0; y<ymin; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
for(; y<ymax; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0);
}
for(; y<height; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
}
#endif
}
// Decode a sample from the input bitstream into the output frame buffer
bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0};
int channel_decodes = 1; // 3D Work
int channel_offset = 0;
int channel_mask = 0;
int channel_current = 0;
//int wavelet_index;
bool result = true;
uint8_t *local_output = output;
uint8_t *local_buffer = NULL;
int local_pitch = pitch;
int internal_format = decoder->frame.format;
int output_format = decoder->frame.output_format;
bool use_local_buffer = false;
DECODER *local_decoder = decoder;
//Frame_Region emptyFrameMask = {0};
Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER;
int orig_width = decoder->frame.width;
int orig_height = decoder->frame.height;
decoder->local_output = local_output; // used for NV12 decodes.
decoder->sample_uncompressed = 0; // set if a uncompressed sample is found.
decoder->image_dev_only = 0;
if(decoder->flags & (1<<3)) // This is an image development only decode.
{
decoder->sample_uncompressed = 1;
decoder->image_dev_only = 1;
decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444;
decoder->codec.unique_framenumber = 0; //What should this be?
decoder->frame.white_point = 16; // how to we pass this in?
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer;
switch(output_format & 0x7fffffff)
{
case COLOR_FORMAT_RGB24:
decoder->uncompressed_size = orig_width * orig_height * 3;
break;
case COLOR_FORMAT_RGB32:
decoder->uncompressed_size = orig_width * orig_height * 4;
break;
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_WP13:
decoder->uncompressed_size = orig_width * orig_height * 6;
break;
default:
decoder->uncompressed_size = orig_width * orig_height * 6;
assert(0);
break;
}
}
decoder->frame.alpha_Companded = 0; // reset this state.
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = 0;
decoder->error = CODEC_ERROR_OKAY;
input->error = BITSTREAM_ERROR_OKAY;
// first time through encoded_format is not initized.
if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed
{
SAMPLE_HEADER header;
BITSTREAM input2;
InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ);
memset(&header, 0, sizeof(SAMPLE_HEADER));
header.find_lowpass_bands = 2; // help finding the uncompressed flag
if(ParseSampleHeader(&input2, &header))
{
decoder->codec.encoded_format = header.encoded_format;
decoder->sample_uncompressed = header.hdr_uncompressed;
if(decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed;
}
}
if((uintptr_t)input->lpCurrentBuffer & 0x3)
{
if(decoder->aligned_sample_buffer == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16);
#else
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
else
{
if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size)
{
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
}
else
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
}
input->lpCurrentBuffer = decoder->aligned_sample_buffer;
input->lpCurrentWord = decoder->aligned_sample_buffer;
}
#if 0 // Test for missaligning the image data
if(((int)input->lpCurrentBuffer&3) == 0)
{
int i;
uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer;
int missaligned = 1; //2 or 3
for(i=input->dwBlockLength-1; i>=0; i--)
ptr[i+missaligned] = ptr[missaligned];
input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned];
input->lpCurrentWord = (uint8_t *)&ptr[missaligned];
}
#endif
//HACK
// Unfortunately I need color matrix data deep within the codec for RT playback.
if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input
{
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER)
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
if (size > sizeof(CFHDDATA)) {
// Limit the size to the known structure
size = sizeof(CFHDDATA);
}
memcpy(&decoder->cfhddata, cfhddata, size);
}
}
else
{
unsigned short value;
if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA))
{
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER;
decoder->cfhddata.size = sizeof(CFHDDATA);
if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value))
{
if(value == COLOR_FORMAT_RG48)
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if(value == COLOR_FORMAT_RG64)
{
decoder->cfhddata.cfhd_subtype = 3; //RGBA
decoder->cfhddata.num_channels = 4;
}
else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END)
{
unsigned int format = BAYER_FORMAT_RED_GRN;
decoder->cfhddata.cfhd_subtype = 1; //BAYER
decoder->cfhddata.bayer_format = format; // default to Red-Grn
decoder->cfhddata.version = CFHDDATA_VERSION;
}
}
}
}
OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed);
if(decoder->image_dev_only) // HACK we need to support 3D also.
decoder->source_channels = 1;
else
decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0);
if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override))
{
//int channels = 0;
int channel_blend_type = BLEND_NONE;
int channel_swapped_flags = 0;
if(decoder->cfhddata.MSCTV_Override)
{
channel_mask = decoder->cfhddata.MSCTV_Override&0xff;
channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff);
}
else
{
channel_mask = decoder->cfhddata.MSChannel_type_value&0xff;
channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff);
}
if(channel_mask != 3)
{
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
//if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302
{
if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 2); // 3D work
}
}
else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only
{
if(decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 1); // 3D work
}
else
{
//assume second channel decode
SkipVideoChannel(decoder, input, 2); // 3D work
}
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel
{
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if((channel_mask&3) == 3) // A+B 3d work
{
channel_decodes = 2;
decoder->channel_mix_half_res = 0;
if(channel_blend_type != BLEND_NONE)
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
//if(decoder->frame.format == DECODED_FORMAT_W13A)
// {
// decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
// }
//else
//{
// decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
// }
decoder->frame.format = internal_format = DECODED_FORMAT_RGB32;
local_pitch = decoder->frame.width * 4;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RGB24;
local_pitch = decoder->frame.width * 3; //RGB24
}
/* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(output_format == DECODED_FORMAT_YUYV ||
output_format == DECODED_FORMAT_UYVY))
{
if( channel_blend_type == BLEND_FREEVIEW ||
((channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch = (decoder->frame.width) * 3; //RGB24
}
} */
}
/* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA
{
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}*/
/* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
} */
if( decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW))
{
if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if(decoder->sample_uncompressed)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
}
else
{
if(decoder->preformatted_3D_type > BLEND_NONE)
{
// leave as is.
}
else if(FORMAT8BIT(output_format))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL;
decoder->frame.width /= 2;
local_pitch /= 2;
}
}
}
else
{
if(FORMAT8BIT(output_format))
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
//TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
}
if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2
{
channel_offset = local_pitch * (decoder->frame.height);
}
else if(channel_blend_type >= BLEND_ANAGLYPH_RC)
{
/* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph
{
//B&W using YUYV
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
}*/
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}
else if(channel_blend_type == BLEND_NONE) // double high
{
channel_offset = pitch * decoder->frame.height;
}
else
{
channel_blend_type = BLEND_STACKED_ANAMORPHIC;
channel_offset = pitch * (decoder->frame.height/2);
}
// fields, stacked, etc, only works on full or half res.
if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED &&
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail.
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
if (channel_blend_type != BLEND_NONE &&
(output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 ))
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
}
}
decoder->channel_decodes = channel_decodes;
decoder->channel_blend_type = channel_blend_type;
decoder->channel_swapped_flags = channel_swapped_flags;
}
else
{
decoder->channel_decodes = channel_decodes = 1;
decoder->channel_blend_type = BLEND_NONE;
decoder->channel_swapped_flags = 0;
}
if(cfhddata) // So the P-frames can know the bayerformat
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
if (size > sizeof(CFHDDATA)) {
size = sizeof(CFHDDATA);
}
memcpy(cfhddata, &decoder->cfhddata, size);
}
{
bool doOrientation = true;
bool doFraming = true;
bool doBurins = true;
bool doImageflips = true;
bool doGhostBust = false;
bool doPrimaries = true;
int process_path_flags = decoder->cfhddata.process_path_flags;
int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask;
if(decoder->basic_only)
{
doOrientation = false;
doFraming = false;
doBurins = false;
doImageflips = false;
doPrimaries = false;
}
else
{
if(decoder->cfhddata.process_path_flags_mask)
{
//DAN20101007 --
if(process_path_flags == 0)
decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask;
process_path_flags &= decoder->cfhddata.process_path_flags_mask;
if(process_path_flags_mask & PROCESSING_ACTIVE2)
{
if(!(process_path_flags_mask & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags_mask & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags_mask & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS))
doImageflips = false;
}
if(!(process_path_flags_mask & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
if(process_path_flags & PROCESSING_ACTIVE2)
{
if(!(process_path_flags & PROCESSING_ORIENTATION))
doOrientation = false;
if(!(process_path_flags & PROCESSING_FRAMING))
doFraming = false;
if(!(process_path_flags & PROCESSING_BURNINS))
doBurins = false;
if(!(process_path_flags & PROCESSING_IMAGEFLIPS))
doImageflips = false;
if(!(process_path_flags & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
}
if(doOrientation)
process_path_flags |= PROCESSING_ORIENTATION;
if(doFraming)
process_path_flags |= PROCESSING_FRAMING;
if(doBurins)
process_path_flags |= PROCESSING_BURNINS;
if(doImageflips)
process_path_flags |= PROCESSING_IMAGEFLIPS;
if(doPrimaries)
process_path_flags |= PROCESSING_COLORMATRIX;
if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if(decoder->ghost_bust_left || decoder->ghost_bust_right)
{
doGhostBust = true;
}
}
decoder->cfhddata.process_path_flags = process_path_flags;
if((!decoder->basic_only &&
(doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL ||
decoder->cfhddata.channel[0].FloatingWindowMaskR ||
decoder->cfhddata.channel[0].FrameKeyStone ||
decoder->cfhddata.channel[0].FrameTilt ||
decoder->cfhddata.channel[0].HorizontalOffset ||
decoder->cfhddata.channel[0].VerticalOffset ||
decoder->cfhddata.channel[0].RotationOffset ||
decoder->cfhddata.channel[1].FloatingWindowMaskL ||
decoder->cfhddata.channel[1].FloatingWindowMaskR ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[1].HorizontalOffset ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FloatingWindowMaskL ||
decoder->cfhddata.channel[2].FloatingWindowMaskR ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].FrameTilt ||
decoder->cfhddata.channel[2].HorizontalOffset ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0)))
||
(doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[2].user_blur_sharpen != 0.0))
||
(doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[1].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[2].user_vignette_start != 0.0))
||
(doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
||
(doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2))
||
(doImageflips && decoder->cfhddata.channel_flip)
||
(decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) ||
(decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) ||
(decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes.
( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) ||
decoder->sample_uncompressed) ||
(decoder->cfhddata.doMesh)
)
{
if( output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 )
{
// no manipulation should be applied
}
else
{
use_local_buffer = true;
local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48
if(decoder->image_dev_only)
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
local_pitch = ((decoder->frame.width+7)/8)*8 * 8;
}
else
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
local_pitch *= 2; // need horizontal room to make 3D side by side frame
}
/*
if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A)
{
// preserve HDR
decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output
if(output_format == DECODED_FORMAT_W13A)
local_pitch = decoder->frame.width * 8;
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
local_pitch = decoder->frame.width * 8;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG48;
}
}*/
channel_offset = local_pitch * (decoder->frame.height);
}
}
}
if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
if(decoder->BYR4LinearRestore == NULL)
{
int j,val;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
//int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
float encode_curvebase;
if(encode_curve_type) //1 or 2
{
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
else
{
encode_curve_type = CURVE_TYPE_LOG;
encode_curvebase = 90.0;
}
#if _ALLOCATOR
decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16);
#else
decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16);
#endif
for(j=0; j<16384; j++) //0 to 1
{
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = (int)(CURVE_LOG2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_GAMMA:
val = (int)(CURVE_GAM2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINEON:
val = (int)(CURVE_CINEON2LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINE985:
val = (int)(CURVE_CINE9852LIN((float)j/16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_PARA:
val = (int)(CURVE_PARA2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_CSTYLE:
val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_SLOG:
val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LOGC:
val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LINEAR:
default:
val = j;
break;
}
if(val < 0) val = 0;
if(val > 65535) val = 65535;
decoder->BYR4LinearRestore[j] = val;
}
}
}
//DAN20120319 - removed
/*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size
{
local_pitch *= 2;
channel_offset = local_pitch * (decoder->frame.height*2);
}*/
if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats
{
int stereoframesize = channel_offset * channel_decodes/*stacked frames*/;
if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE)
stereoframesize = channel_offset;
if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize)
{
#if _ALLOCATOR
if(decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#else
if(decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#endif
assert(decoder->StereoBuffer != NULL);
if (! (decoder->StereoBuffer != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->StereoBufferSize = stereoframesize;
}
decoder->StereoBufferFormat = internal_format;
local_buffer = (uint8_t *)decoder->StereoBuffer;
local_output = local_buffer;
}
DecodeEntropyInit(decoder);
//swapped -- Maybe useful for double height decodes.
/* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED)
{
local_output += channel_offset;
channel_offset = -channel_offset;
}*/
decoder->use_local_buffer = use_local_buffer ? 1 : 0;
if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1)
{
int encoded_width = decoder->frame.width;
int encoded_height = decoder->frame.height;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
encoded_height *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 4;
encoded_height *= 4;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_height *= 2;
}
#if _ALLOCATOR
decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#else
decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER));
if(decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl;
DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#endif
}
// Using the parallel decoder?
if (decoder->parallelDecoder)
{
// Initialize the parallel decoder with parameters from the regular decoder
memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA));
memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16);
DecodeEntropyInit(decoder->parallelDecoder);
DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize);
decoder->parallelDecoder->channel_decodes = decoder->channel_decodes;
decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type;
decoder->parallelDecoder->flags = decoder->flags;
decoder->parallelDecoder->frame = decoder->frame;
decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0;
decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format;
if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0)
{
CreateLock(&decoder->parallelDecoder->decoder_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool,
1, //
ParallelThreadProc,
decoder->parallelDecoder);
}
}
if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
// Second stream as a thread.
BITSTREAM second_input = *input;
if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap
{
BITSTREAM leftEye_input = *input;
SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work
*input = leftEye_input;
SkipVideoChannel(decoder, &second_input, 1); // 3D work
}
else
SkipVideoChannel(decoder, &second_input, 2); // 3D work
decoder->channel_current = 0;
decoder->parallelDecoder->channel_current = 1;
// Instead of reading the metadata databases again, use the ones in the main decoder
OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed);
// DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.)
// OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed);
//OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed);
// Hack, this gets lost
decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position;
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) // overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
// Post a message to the mailbox
decoder->parallelDecoder->decoder_thread.input = &second_input;
if(use_local_buffer == false &&
(decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24))
{
decoder->parallelDecoder->decoder_thread.output = local_output;
local_output += channel_offset;
}
else
{
decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset;
}
decoder->parallelDecoder->decoder_thread.pitch = local_pitch;
decoder->parallelDecoder->decoder_thread.colorparams = colorparams;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START);
// do the first channel
{
TAGVALUE segment;
int sample_type;
#if _THREADED
decoder->entropy_worker_new.next_queue_num = 0;
decoder->entropy_worker_new.threads_used = 0;
#endif
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool);
}
else
{
while(channel_decodes > 0)
{
TAGVALUE segment;
int sample_type;
local_decoder->channel_current = channel_current++;
//OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed);
#if (_THREADED && _GRAPHICS)
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
if(decoder->cfhddata.BurninFlags & 3) //overlays / tools
{
DrawStartThreaded(decoder);
}
}
#endif
#if _THREADED
local_decoder->entropy_worker_new.next_queue_num = 0;
local_decoder->entropy_worker_new.threads_used = 0;
#endif
if(decoder->image_dev_only)
{
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
}
else
{
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) {
local_decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset))
{
channel_decodes = 0;
}
else
{
channel_decodes--;
local_output += channel_offset;
if(decoder->parallelDecoder)
{
local_decoder = decoder->parallelDecoder;
}
}
}
}
if(use_local_buffer && output)
{
decoder->use_local_buffer = 0;
#if WARPSTUFF
WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat);
#endif
ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset));
}
else
{
#if WARPSTUFF
WarpFrame(decoder, output, pitch, output_format);
MaskFrame(decoder, output, pitch, output_format);
#endif
}
if(decoder->channel_mix_half_res) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
decoder->frame.height *= 2;
decoder->channel_mix_half_res = 0;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
}
if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
}
#if _GRAPHICS
if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output)
{
PaintFrame(decoder, output, pitch, output_format);
}
#endif
STOP(tk_decompress);
// Return indication of whether decoding succeeded or failed
return result;
}
// Decode a sample that encoded a group of frames (return the first frame)
bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
#if (0 && DEBUG)
// Force quarter resolution decoding for debug that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoding sample group\n");
}
#endif
START(tk_decoding);
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
//segment = GetTagValue(input);
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) break;
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_complete;
}
else
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the temporal wavelet
int temporal_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[temporal_index];
#if (0 && DEBUG)
if (IsBandValid(wavelet, HIGHPASS_BAND))
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Temporal-decode-%d-", count);
DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL);
}
count++;
}
#endif
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the temporal wavelet been decoded?
//if (wavelet && BANDS_ALL_VALID(wavelet))
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the temporal inverse transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, temporal_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 0 );
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
else if (wavelet == NULL)
{
// The temporal wavelet is not created during quarter resolution decoding
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_complete:
STOP(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
char label[_MAX_PATH];
int channel;
for (channel = 0; channel < codec->num_channels; channel++)
{
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[2];
uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND];
int height = wavelet->height;
int pitch = wavelet->pitch;
int size = height * pitch;
int band;
for (band = 0; band < wavelet->num_bands; band++)
{
sprintf(label, "Temporal channel: %d, band: %d", channel, band);
DumpBandStatistics(label, wavelet, band, logfile);
#if 0
sprintf(label, "Temporal-channel%d-band%d-", channel, band);
DumpBandPGM(label, wavelet, band, NULL);
#endif
}
assert(size > 0);
ZeroMemory(data, size);
}
}
#endif
if (result)
{
// Two frames have been decoded
decoder->gop_length = 2;
decoder->frame_count += 2;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame in the group
if (!decoder->no_output)
{
#if 0
// Decoding to quarter frame resolution at full frame rate?
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
char *buffer = decoder->buffer;
size_t buffer_size = decoder->buffer_size;
uint8_t *frame1 = output;
uint8_t *frame2 = decoder->output2;
assert(frame2 != NULL);
// Reconstruct two frames at quarter resolution
ReconstructQuarterFrame(decoder, num_channels,
frame1, frame2, pitch,
info, buffer, buffer_size);
}
else
#endif
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that represents the second frame in a group
bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
bool result = true;
START(tk_decoding);
// Decode the tag value pairs in the frame sample
for (;;)
{
TAGWORD tag;
TAGWORD value;
// Read the next tag value pair from the bitstream
//TAGVALUE segment = GetTagValue(input);
TAGVALUE segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
tag = segment.tuple.tag;
value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
}
// End of the frame header?
if (tag == CODEC_TAG_FRAME_INDEX) break;
}
STOP(tk_decoding);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
if (result)
{
// Return the second frame in the group
// assert(decoder->gop_length >= 2);
if (decoder->gop_length >= 2)
{
int frame_index = 1; // Display the second frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
else if (decoder->gop_length > 0)
{
int frame_index = 0; // Display the first frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Frame type that is not handled
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that encodes an intra frame
bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]);
START(tk_decoding);
if(decoder->image_dev_only) goto decoding_completeI;
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
//Force V210 output for debugging ***DEBUG***
//decoder->frame.format = DECODED_FORMAT_V210;
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY) {
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) {
break;
}
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_completeI;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the highest wavelet in the pyramid
int wavelet_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[wavelet_index];
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the wavelet been decoded?
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, wavelet_index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
#if (0 & DEBUG)
if (logfile) {
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the inverse spatial transform to the processing queue
if(decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, wavelet_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 0);
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
//else if (wavelet == NULL)
else
{
// The wavelet may not have been created during quarter resolution decoding
// The wavelet should have been created if all bands are valid
assert(wavelet != NULL);
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_completeI:
STOP(tk_decoding);
if (result)
{
// One frame has been decoded
decoder->gop_length = 1;
decoder->frame_count += 1;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile,
"DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame (the only frame that was decoded)
if (!decoder->no_output)
{
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
//CODEC_STATE *codec = &decoder->codec;
TRANSFORM **transform_array = decoder->transform;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
FRAME_INFO *info = &decoder->frame;
int precision = codec->precision;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision);
}
else
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
}
if (decoder->error != CODEC_ERROR_OKAY) {
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_ERROR error = CODEC_ERROR_OKAY;
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
CHANNEL_HEADER header;
TRANSFORM *transform = decoder->transform[channel];
TRANSFORM *next_transform;
// Advance to the next channel
channel++;
// Get the next transform for decoded information
//TRANSFORM *next_transform = AllocGroupTransform(group, channel);
// Decode the rest of the channel header
error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL);
assert(error == CODEC_ERROR_OKAY);
decoder->error = error;
if (error != CODEC_ERROR_OKAY) return false;
// The decoder is not able to skip channels
assert(header.channel == channel);
// Initialize the next transform using the previous one
next_transform = decoder->transform[channel];
InitChannelTransform(next_transform, transform);
// Update the channel
codec->channel = channel;
// Reset the subband counter
codec->band.subband = 0;
// Reset the decoded subband flags
codec->decoded_subband_flags = 0;
// Loop back to decode the next channel
//transform = next_transform;
return true;
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int *subband_wavelet_index = decoder->subband_wavelet_index;
// Used for quarter resolution and threaded decoding
int transform_type = transform->type;
// Wavelet parameters
int width;
int height;
int level;
int type;
int band;
int threading = 1;
// Wavelet containing the band to decode
int index;
IMAGE *wavelet = NULL;
bool result;
if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS)
threading = 0;
// Update the transform data structure from the codec state
UpdateCodecTransform(transform, codec);
// Is this an empty band?
if (subband == 255)
{
// Decode an empty band
// This wavelet is the temporal wavelet
index = 2;
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
// The empty band should be the highpass band in a temporal wavelet
assert(type == WAVELET_TYPE_TEMPORAL && band == 1);
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// Set the wavelet parameters
wavelet->pixel_type[band] = PIXEL_TYPE_16S;
wavelet->num_bands = 2;
result = DecodeSampleEmptyBand(decoder, input, wavelet, band);
// Set the subband number for the next band expected in the bitstream
codec->band.subband = 11;
}
// Is this a highpass band?
else if (subband > 0)
{
// Decode a highpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[subband];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandStartedFlags(decoder, wavelet, band);
}
// Reset the default encoding method
codec->band.encoding = BAND_ENCODING_RUNLENGTHS;
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
else
{
// Decode a lowpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[0];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->lowpass.width;
height = codec->lowpass.height;
level = codec->lowpass.level;
type = codec->first_wavelet;
//band = codec->band.number;
band = 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// The lowpass data is always stored in wavelet band zero
assert(band == 0);
// The lowpass band must be subband zero
assert(subband == 0);
result = DecodeSampleLowPassBand(decoder, input, wavelet);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
// Was the subband successfully decoded?
if (result)
{
// The transform will set the band valid flag if this is the temporal wavelet
//if (index != 2)
// Record that this subband has been decoded successfully
if (0 <= subband && subband <= CODEC_MAX_SUBBAND)
codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n",
subband, index, channel);
}
#endif
}
#if _THREADED_DECODER
// Ready to queue a threaded transform to invert this wavelet?
if (BANDS_ALL_STARTED(wavelet))
{
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2)
{
if(decoder->entropy_worker_new.pool.thread_count && threading)
{
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 1);
// Add the inverse wavelet transform to the processing queue
QueueThreadedTransform(decoder, codec->channel, index);
}
else
{
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 0);
}
}
}
#else
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
if (BANDS_ALL_VALID(wavelet))
{
int channel = codec->channel;
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int band;
sprintf(label, "Channel: %d, index: %d", channel, index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index) {
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0);
}
#endif
return result;
}
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
bool result = true;
int lowpass_width; // Lowpass band dimensions
int lowpass_height;
int lowpass_pitch;
PIXEL *pLowPassRow; // Pointer into the lowpass band
//int wavelet_width; // Dimensions of the wavelet image
//int wavelet_height;
int bits_per_pixel;
int quantization;
int offset;
//int pixel_divisor = (1 << (2 * codec->lowpass.level));
int row, column;
int32_t solid_color = -1;
const int gain = 128;
const int colorshift = 0;
// int channelgain[4];
//int waterrow=19, watercol=214;
//int cspace = decoder->frame.colorspace;
// Lowpass image dimensions may be smaller than the wavelet dimensions
// because the encoder may have transmitted an image without the border
lowpass_width = codec->lowpass.width;
lowpass_height = codec->lowpass.height;
lowpass_pitch = wavelet->pitch/sizeof(PIXEL);
pLowPassRow = wavelet->band[0];
// Get the parameters for quantization performed by the encoder
quantization = codec->lowpass.quantization;
offset = codec->lowpass.pixel_offset;
bits_per_pixel = codec->lowpass.bits_per_pixel;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode lowpass subband\n");
}
#endif
if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1))
{
int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord;
//int signval = 0;
//int channel3stats = 0;
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301
break;
default:
channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601
}
if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?)
channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames
}
else if(decoder->codec.precision == 12)
{
switch(decoder->frame.format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
channeloffset = 8; //DAN200906010
break;
// 16-bit precision:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
channeloffset = 0;
break;
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed.
break;
default:
channeloffset = 0;
break;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames
channeloffset = 0;
#define DUMPLL 0
#if (_DEBUG && DUMPLL)
FILE *fp;
if(channel == 0)
{
static int inc = 1;
char name[256];
sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++);
fp = fopen(name,"w");
fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height);
}
#endif
#if LOSSLESS
channeloffset = 0; //LOSSLESS
#endif
//if(lpCurrentLong[0] == 0xffffffff)
if(lpCurrentLong[0] == (int32_t)UINT32_MAX)
{
if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width)
{
if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height)
{
solid_color = SwapInt32BtoN(lpCurrentLong[1]);
solid_color |= (solid_color<<16);
lpCurrentLong += 4;
}
}
}
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
int pixels;
// Start at the first column
column = 0;
// Process the rest of the row
{
for (; column < lowpass_width; column++)
{
int pixel_value;
//int i;
// Perform inverse quantization
if(column & 1)
{
pixel_value = pixels;
}
else
{
//pixels = _bswap(*(lpCurrentLong++));
if(solid_color == -1)
pixels = SwapInt32BtoN(*(lpCurrentLong++));
else
pixels = solid_color;
pixel_value = (pixels>>16);
pixels <<= 16;
pixels >>= 16;
}
// Store the pixel in the lowpass band of the wavelet
pixel_value += channeloffset;
// pixel_value -= 64;
// pixel_value += ((rand() & 0x7fff) - 0x4000);
// if(pixel_value < 0) pixel_value = 0;
if(pixel_value > 0x7fff) pixel_value = 0x7fff;
pLowPassRow[column] = pixel_value;
#if (_DEBUG && DUMPLL)
if(channel==0 && fp)
fprintf(fp, "%d\n", pixel_value>>7);
#endif
}
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if (_DEBUG && DUMPLL)
if(channel == 0 && fp)
fclose(fp);
#endif
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentLong;
}
else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE)
{
uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord;
//int signval = 0;
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
// Start at the first column
column = 0;
// Process the rest of the row
for (; column < lowpass_width; column++)
{
int pixel_value = *(lpCurrentByte++);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
pixel_value -= 128 * quantization;
pixel_value *= gain;
pixel_value >>= 7;
pixel_value += 128 * quantization;
pixel_value += colorshift;
// Store the pixel in the lowpass band of the wavelet
// Multiply by 16 to turn 8-bit into the new 16-bit format
pLowPassRow[column] = pixel_value * 16;
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentByte;
}
else
{
int channeloffset = 0;
if(decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames==2 ? 64 : 32);
}
else if(decoder->codec.precision == 10)
{
channeloffset = (codec->num_frames==2 ? 10 : 5);
}
else if(decoder->codec.precision == 12)
{
// channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images
}
//DAN20050923 no longer trying to compensate for YUV to RGB issues.
if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32)
{
if(decoder->codec.precision == 8)
{
switch(channel)
{
case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 16; break;
case 2: channeloffset += 10; break;
}
}
else if(decoder->codec.precision == 10)
{
switch(channel)
{
case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += -4; break;
case 2: channeloffset += -4; break;
}
}
else if(decoder->codec.precision == 12)
{
switch(channel)
{
case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB
case 1: channeloffset += 0; break;
case 2: channeloffset += 0; break;
}
}
}
if(bits_per_pixel != 16)
channeloffset = 0;
for (row = 0; row < lowpass_height; row++)
{
for (column = 0; column < lowpass_width; column++) {
int pixel_value = GetBits(stream, bits_per_pixel);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
// Store the pixel in the lowpass band of the wavelet
pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes
}
stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3);
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
}
// Set the wavelet scale factor
wavelet->scale[0] = quantization;
// Align the bitstream to the next tag value pair
AlignBitsTag(stream);
// Return indication of lowpass decoding success
return result;
}
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int index = codec->highpass.wavelet_number;
int width;
int height;
int quantization;
// The encoder may not have used variable-length coding
int method = codec->band.encoding;
bool result = true;
// Check that the band index is in range
assert(0 <= band && band <= codec->max_subband);
// Encoded coefficients start on a tag boundary
AlignBitsTag(stream);
#if (0 && DEBUG)
// Dump the band header to the logfile
if (logfile) {
fprintf(logfile,
"Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n",
header->marker, header->subband, header->width, header->height, header->encoding);
}
#endif
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply this parameter)
if (codec->band.scale > 0) {
wavelet->scale[band] = codec->band.scale;
}
// Get the quantization factor that was used to encode the band coefficients
quantization = codec->band.quantization;
// Copy the quantization into the wavelet
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization);
}
#endif
// Get the highpass band dimensions
width = codec->band.width;
height = codec->band.height;
// Is this a special band for the temporal high pass thumbnail?
if (method == BAND_ENCODING_LOSSLESS)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else if (method == BAND_ENCODING_16BIT)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16s(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else
{
// Must use the runlength encoding method
assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS);
#if 0
// This code attempts to not decode various subbands for 1/4 res decodes.
// Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.)
if (subband >= 4 && subband <= 6)
{
TAGVALUE segment;
AlignBitsTag(stream);
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
else
#elif 0
// Is this subband required for decoding the frame?
if (CanSkipSubband(decoder, subband))
{
// Skip past the end of this subband
SkipSubband(stream);
}
#endif
// Decode this subband
result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading);
}
// Return failure if a problem was encountered while reading the band coefficients
if (!result) return result;
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band %d trailer: %d\n", band, error);
}
#endif
return false;
}
return result;
}
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int quantization;
// Check that the band is in range
assert(0 <= band && band <= CODEC_MAX_HIGHBANDS);
// Check that the highpass band is 16 bits
assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S);
#if (0 && DEBUG)
//TODO: Change format string to handle 64-bit pointers
if (logfile) {
fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord);
}
#endif
// Encoded coefficients must start on a word boundary
AlignBits(stream);
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply the parameter)
if (codec->band.scale > 0)
wavelet->scale[band] = codec->band.scale;
// Set the quantization used to encode the band coefficients
quantization = codec->band.quantization;
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile) {
DumpBits(stream, logfile);
}
#endif
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Error in band: %d, error: %d\n", band, error);
}
#endif
return false;
}
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
#if (0 && DEBUG)
// Dump the band trailer to the logfile
if (logfile) {
fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker);
}
#endif
#if (0 && DEBUG)
if (logfile) {
//TODO: Change format string to handle 64-bit pointers
fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord);
}
#endif
return true;
}
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
PIXEL *rowptr = wavelet->band[band_index];
int pitch = wavelet->pitch;
int row,dequant = wavelet->quantization[band_index];
// Convert the pitch from bytes to pixels
pitch /= sizeof(PIXEL);
//BAND_ENCODING_16BIT
if(dequant == 1)
{
for (row = 0; row < height; row++)
{
int column;
#if 0
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value;
}
#else // Mild speedup (2.5% overall half-res decode improvement.)
char *sptr = (char *)stream->lpCurrentWord;
char *dptr = (char *)rowptr;
for (column = 0; column < width; column++)
{
*(dptr+1) = *sptr++;
*dptr = *sptr++;
dptr+=2;
}
stream->lpCurrentWord += width*2;
stream->nWordsUsed += width*2;
#endif
rowptr += pitch;
}
}
else
{
for (row = 0; row < height; row++)
{
int column;
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value*dequant;
}
rowptr += pitch;
}
}
#if (0 && DEBUG)
{
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
sprintf(label, "Hightemp-decode-%d-", count);
DumpBandPGM(label, wavelet, band_index, NULL);
}
count++;
}
#endif
return true;
}
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
//CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
//int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
//int threading = 0;
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (! (wavelet != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//Must have a valid FSM
assert(fsm != NULL);
if (! (fsm != NULL)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// All rows are treated as one int32_t row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch;
assert(rowptr != NULL && pitch != 0);
if (! (rowptr != NULL && pitch != 0)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes.
if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
if(quant)
{
int x,y;
PIXEL *line = rowptr;
if(quant == 32)
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] <<= 5;
}
line += pitch/2;
}
}
else
{
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
line[x] *= quant;
}
line += pitch/2;
}
}
}
/* if(once <= 60)
{
char name[200];
FILE *fp;
sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once);
fp = fopen(name,"wb");
fwrite(rowptr,width*height,1,fp);
fclose(fp);
once++;
}*/
assert(result == true);
if (! (result == true)) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
return true;
}
// Invert the wavelet to reconstruct the lower wavelet in the transform
void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel,
IMAGE *wavelet, int index, int precision,
const SCRATCH *scratch, int allocations_only)
{
int transform_type = transform->type;
int width = wavelet->width;
int height = wavelet->height;
int level = wavelet->level;
PIXEL *buffer = (PIXEL *)scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Is the current wavelet a spatial wavelet?
if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0)
{
// Reconstruct the lowpass band in the lower wavelet
int lowpass_index = index - 1;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL;
//const int prescale = 1;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
// Check that the lowpass band has not already been reconstructed
//assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
if(!allocations_only)
{
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Has this wavelet already been reconstructed?
if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0)
{
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
}
// Is the current wavelet a spatial wavelet above the temporal lowpass band?
else if (index > 3)
{
// Reconstruct the lowpass band in the lower wavelet
const int temporal_wavelet_index = 2;
int lowpass_index = (index > 4) ? index - 1 : index - 2;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
//const int prescale = 2;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
if(!allocations_only)
{
// Check that the lowpass band has not already been reconstructed
assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the spatial wavelet above the temporal highpass band?
else if (index == 3)
{
// Reconstruct the highpass band in the temporal wavelet
const int temporal_wavelet_index = 2;
int highpass_index = index - 1;
IMAGE *highpass = transform->wavelet[highpass_index];
int highpass_width = 2 * width;
int highpass_height = 2 * height;
int highpass_level = level - 1;
int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = inverse_prescale ? transform->prescale[index] : 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
highpass = GetWaveletThreadSafe(decoder, transform, highpass_index,
highpass_width, highpass_height,
highpass_level, highpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#else
highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type);
#endif
transform->wavelet[highpass_index] = highpass;
#endif
if(!allocations_only)
{
// Check that the highpass band has not already been reconstructed
assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, highpass, 1);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the temporal wavelet?
else if (index == 2)
{
// Get the temporal wavelet
IMAGE *temporal = wavelet;
// Set the frame wavelet parameters
int frame_level = 1;
int frame_type = WAVELET_TYPE_FRAME;
// Get the two frame wavelets
IMAGE *frame[2];
frame[0] = transform->wavelet[0];
frame[1] = transform->wavelet[1];
// Check that the temporal wavelet is valid
assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL);
#if _THREADED_DECODER
// Allocate (or reallocate) the frame wavelets with thread safety
frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type);
frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type);
#else
// Allocate the frame wavelets if not already allocated
#if _ALLOCATOR
frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type);
#else
frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type);
#endif
transform->wavelet[0] = frame[0];
transform->wavelet[1] = frame[1];
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Before inverse temporal transform");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
}
#endif
if(!allocations_only)
{
// Check that the lowpass bands have not already been reconstructed
assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(temporal));
// Invert the temporal transform between the frame wavelets
STOP(tk_decoding);
START(tk_inverse);
TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision);
STOP(tk_inverse);
START(tk_decoding);
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = quad[0];
fprintf(logfile, "After inverse temporal transform\n");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, frame[0], 0);
UpdateWaveletBandValidFlags(decoder, frame[1], 0);
#if TIMING
// Increment the number of temporal transforms performed outside of decoding
temporal_decoding_count++;
#endif
}
}
}
// Compute the dimensions of the output buffer
void ComputeOutputDimensions(DECODER *decoder, int frame,
int *decoded_width_out, int *decoded_height_out)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet = NULL;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
int decoded_scale = 0;
if (decoded_width_out == NULL || decoded_height_out == NULL) {
return;
}
// Clear the return values in case this routine terminates early
*decoded_width_out = 0;
*decoded_height_out = 0;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
}
else
{
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
}
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
break;
default:
assert(0);
break;
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
decoded_width = wavelet_width;
else
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
// Return the decoded width and height
*decoded_width_out = decoded_width;
*decoded_height_out = decoded_height;
}
#define DEBUG_ROW16U 0
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
FRAME_INFO local_info;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &local_info;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = decoder->frame.resolution;
int chroma_offset = decoder->codec.chroma_offset;
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
//TODO: Change this routine to return the codec error code
CODEC_ERROR error = CODEC_ERROR_OKAY;
//if(decoder->cfhddata.calibration)
// LoadTweak();
//TODO: Change this routine to return an error code
if (decoder == NULL) {
return;
}
decoder->gop_frame_num = frame;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
//return;
// copy frame info in a changable local structure
memcpy(info, &decoder->frame, sizeof(FRAME_INFO));
// Use the old code for reconstructing the frame
#if (0 && DEBUG)
// Force quarter resolution decoding for debugging that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame);
}
#endif
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) return;
#if (1 && DEBUG_ROW16U)
// Force decoding to 16-bit pixels for debugging
info->format = DECODED_FORMAT_YR16;
#endif
#if 0
if (info->format == DECODED_FORMAT_YR16)
{
// Force interlaced or progressive decoding for debugging
//progressive = false;
progressive = true;
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder discarding frame: %d\n", frame);
}
#endif
return;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
decoder->error = CODEC_ERROR_RESOLUTION;
return;
}
#if (0 && TIMING) //(0 && DEBUG)
// Override progressive flag read from the bitstream for debugging
//progressive = 0; // Use the inverse frame transform
progressive = 1; // Use the inverse spatial transform
#endif
// Build the 3D LUTs if needed
ComputeCube(decoder);
//HACK DAN20110131 -- some formats will not directly decode so need to use the AM route
{
if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
resolution == DECODED_RESOLUTION_HALF)
{
if( decoder->frame.format == COLOR_FORMAT_R408 ||
decoder->frame.format == COLOR_FORMAT_V408)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
if( decoder->frame.format == COLOR_FORMAT_NV12)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true; // TODO, make it work with this.
}
if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
// Get the decoding scale
if(!uncompressed)
{
switch(resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = 2 * wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_QUARTER:
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
}
else
{
wavelet = transform_array[0]->wavelet[3];
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
wavelet = transform_array[0]->wavelet[5];
if(wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
default:
assert(0);
break;
}
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
decoded_width = info->width/2;
decoded_height = info->height/2;
}
else
{
decoded_width = info->width;
decoded_height = info->height;
}
}
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
if(resolution == DECODED_RESOLUTION_FULL)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
}
}
else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
if(decoded_width*2 == info->width)
{
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
}
else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4)
{
if(decoded_width*2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER;
}
}
else
{
if(resolution == DECODED_RESOLUTION_HALF)
{
if(decoded_width*2 == info->width)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL;
}
}
else if(resolution == DECODED_RESOLUTION_QUARTER)
{
if(uncompressed)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED;
}
else
{
if(decoded_width == info->width)
{
info->resolution = resolution = DECODED_RESOLUTION_HALF;
}
}
}
}
}
if(uncompressed)
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210)
error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0)
error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
else
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4
case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4
error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
//error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2
// Add new code here for the final steps in decoding the original YUV 4:2:2 format
error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch);
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
// Was the newer code able to successfully reconstruct the frame?
if (error != CODEC_ERROR_UNSUPPORTED_FORMAT)
{
// Save the codec error code in the decoder state and return
decoder->error = error;
return;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
assert((info->height+7)/8 >= (decoded_height+7)/8);
if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) {
decoder->error = CODEC_ERROR_FRAMESIZE;
return;
}
#if (0 && DEBUG)
if (logfile) {
//SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16);
SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16);
// Adjust the subimage to be at the middle of the right border
//subimage.row += wavelet_height/2 - 8;
DumpBand("SIF Image", wavelet, 0, &subimage, logfile);
}
#endif
START(tk_inverse);
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
}
else
// Was the first transform a frame transform (used for interlaced frames)?
if (!progressive)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY))
{
// Apply the inverse frame transform and pack the results into the output buffer
int precision = codec->precision;
#if (0 && DEBUG)
DumpWaveletBandsPGM(wavelet, frame, num_channels);
#endif
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
int precision = codec->precision;
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int scale = 13;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
int precision = codec->precision;
TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
}
}
}
else // The first transform was a spatial transform (used for progressive frames)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
uint8_t *pixoutput = output;
if(decoder->use_active_metadata_decoder) //WIP
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToBayerYUV);
}
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
}
#else
//TODO : Accelerated BAYER for single thread decoding.
assert(0);
// Transform the wavelets for each channel to the output image (not threaded)
//TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info,
// &decoder->scratch, chroma_offset, precision);
#endif
}
else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER &&
(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
{
uint8_t *pixoutput = output;
if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
pixoutput += (info->height-1)*pitch;
pitch = -pitch;
}
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
int precision = codec->precision;
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
STOP(tk_inverse);
#if 1 //|| BAYER_SUPPORT
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[4096*3],*sptr;
//unsigned short scanline2[4096*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
IMAGE *gd_image = lowpass_images[3];
uint8_t *outyuv,*line = output;
PIXEL *bayer_line, *bayerptr;
PIXEL *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
int format = info->format;
bool inverted = false;
int maxbound = 4095; //10-bit source
int midpoint = 32768>>3;
int shift = 4;
if(precision == 12)
{
maxbound = 16383;
midpoint = 32768>>1;
shift = 2;
}
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32)
{
inverted = true;
line += (info->height-1)*pitch;
pitch = -pitch;
}
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
for(y=0; y<info->height; y++)
{
uint8_t *newline = line;
PIXEL *newG=G,*newRG=RG,*newBG=BG;
PIXEL *gptr,*rgptr,*bgptr,*gdptr;
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
newline += pitch*y;
newG += y * (g_image->pitch / sizeof(PIXEL));
newRG += y * (rg_image->pitch / sizeof(PIXEL));
newBG += y * (bg_image->pitch / sizeof(PIXEL));
gptr = newG;
rgptr = newRG;
bgptr = newBG;
sptr = scanline;
for(x=0; x<info->width; x++)
{
g = (*gptr++);
if(g > maxbound) g = maxbound;
rg = (*rgptr++);
bg = (*bgptr++);
r = (rg<<1) - midpoint + g;
b = (bg<<1) - midpoint + g;
if(r > maxbound) r = maxbound;
if(b > maxbound) b = maxbound;
if(r < 0) r = 0;
if(g < 0) g = 0;
if(b < 0) b = 0;
*sptr++ = r<<shift;
*sptr++ = g<<shift;
*sptr++ = b<<shift;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr,
newline, y, pitch,
info->format, whitebitdepth, flags);
}
}
#endif
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
uint8_t *line = output;
unsigned char *rgb8;
PIXEL *G,*RG,*BG;
int x,y;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
if(info->format == DECODED_FORMAT_RGB32)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
*rgb8++ = 255;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
line = output;
line += (info->height-1) * pitch;
for(y=0; y<info->height; y++)
{
PIXEL *gptr,*rgptr,*bgptr;
int r,g,b;
int i,noisearray[32];
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for(x=0; x<info->width; x++)
{
int rnd = noisearray[x&31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if(r < 0) r=0; if(r > 255) r=255;
if(g < 0) g=0; if(g > 255) g=255;
if(b < 0) b=0; if(b > 255) b=255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
}
else
#endif
{
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
START(tk_inverse);
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
int width = info->width;
int height = info->height;
sprintf(label, "Output");
DumpBufferStatistics(label, output, width, height, pitch, logfile);
}
#endif
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
// Handle inversion of the output image in this routine
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
//#if BUILD_PROSPECT
// Output the frame in V210 foramt?
if( (format == DECODED_FORMAT_V210 ||
format == DECODED_FORMAT_YU64) &&
decoder->codec.encoded_format != ENCODED_FORMAT_BAYER )
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// The output buffer is an array of 10-bit pixels packed into double words
#if 0
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2,
buffer, buffer_size, chroma_offset, decoder->codec.precision);
#else
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
else
//#endif
// Decoding a full resolution progressive frame to a Bayer output format?
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL);
}
//TODO: Replace this memory allocation with a scratch buffer allocation
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*decoded_height*4*3*sizeof(PIXEL);
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16);
#endif
decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL);
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
if(decoder->RawBayer16)
{
uint8_t *line;
PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16;
PIXEL16U *G,*RG,*BG,*GD;
int x,y;
int bayer_pitch = info->width*4;
//float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
#if 0
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5f}
};
float mtrx[3][4] =
{
{1.0f, 0, 0, 0},
{0, 1.0f, 0, 0},
{0, 0, 1.0f, 0}
};
float whitebalance[3] = { 1.0f, 1.0f, 1.0f };
#endif
#if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions
/* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2)
{
float fval = 0.0;
int i;
for(i=0; i<12; i++)
{
mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3];
if((i>>2) == (i&3))
{
if(fval != 1.0)
{
matrix_non_unity = 1;
}
}
else
{
if(fval != 0.0)
{
matrix_non_unity = 1;
}
}
}
// not active as VFW isn't yet support the 3D LUTs
if(decoder->cfhddata.version >= 5)
{
int j;
float encode_curvebase = 90.0;
float decode_curvebase = 90.0;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
int decode_curve_type = decoder->cfhddata.decode_curve >> 16;
if(decoder->cfhddata.user_white_balance[0] > 0.0)
{
wb_non_unity = 1;
whitebalance[0] = decoder->cfhddata.user_white_balance[0];
whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0;
whitebalance[2] = decoder->cfhddata.user_white_balance[3];
}
if(encode_curve_type) //1 or 2
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
else
{
encode_curve_type = 1;
encode_curvebase = 90.0;
}
if(decode_curve_type) //1 or 2
decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff);
else
{
decode_curve_type = 1;
decode_curvebase = 90.0;
}
for(j=0; j<2048; j++)
{
if(encode_curve_type == 1)
curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase);
else
curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase);
}
for(j=-512; j<=2048; j++) // -1 to +4
{
if(encode_curve_type == CURVE_TYPE_LOG)
lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase);
else
lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase);
}
}
}*/
#endif
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info,
&decoder->scratch, chroma_offset, precision);
#endif
if(resolution == DECODED_RESOLUTION_FULL_DEBAYER &&
(info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4))
{
#if _THREADED //DemosaicRAW
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* int bayer_format = decoder->cfhddata.bayer_format;
unsigned char *outA8, *outB8;
unsigned short *lineStartA16, *lineStartB16;
unsigned short *lineA16, *lineB16;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++)
{
bayer_line = decoder->RawBayer16;
bayer_line += bayer_pitch * y;
if(y<info->height)
{
ColorDifference2Bayer(info->width,
bayer_line, bayer_pitch, bayer_format);
}
if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline
{
unsigned short *delayptr = decoder->RawBayer16;
delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES);
BayerRippleFilter(info->width,
delayptr, bayer_pitch, bayer_format, decoder->RawBayer16);
}
if(y>=DEMOSAIC_DELAYLINES)
{
int delay_y = y - DEMOSAIC_DELAYLINES;
unsigned short *sptr, scanline[8192*3];
outA8 = line;
line += pitch;
outB8 = line;
line += pitch;
sptr = scanline;
DebayerLine(info->width*2, info->height*2, delay_y*2,
decoder->RawBayer16, bayer_format, sptr, sharpening);
for(x=0; x<info->width*2; x++)
{
outA8[2] = *sptr++>>8;
outA8[1] = *sptr++>>8;
outA8[0] = *sptr++>>8;
outA8+=3;
}
for(x=0; x<info->width*2; x++)
{
outB8[2] = *sptr++>>8;
outB8[1] = *sptr++>>8;
outB8[0] = *sptr++>>8;
outB8+=3;
}
}
}*/
#endif // _THREADED
}
else
if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* {
int bayer_format = decoder->cfhddata.bayer_format;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
int bayer_format = decoder->cfhddata.bayer_format;
for(y=2; y<info->height-3; y++)
{
int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
}*/
#endif
}
// Pack the rows of Bayer data (full resolution progressive) into BYR3 format?
else if (format == DECODED_FORMAT_BYR3)
{
PIXEL16U *outR, *outG1, *outG2, *outB;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
// #pragma omp parallel for
for(y=0; y<info->height; y++)
{
uint8_t *line = output;
PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16;
line += pitch*2*y;
bayerptr += bayer_pitch * y;
outR = (PIXEL16U *)line;
outG1 = outR + (pitch/4);
outG2 = outR + (pitch/4)*2;
outB = outR + (pitch/4)*3;
G = (PIXEL16U *)bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
// Pack the rows of Bayer components into the BYR3 pattern
#if (1 && XMMOPT)
{
__m128i *G_128 = (__m128i *)G;
__m128i *RG_128 = (__m128i *)RG;
__m128i *BG_128 = (__m128i *)BG;
__m128i *GD_128 = (__m128i *)GD;
__m128i *outR_128 = (__m128i *)outR;
__m128i *outG1_128 = (__m128i *)outG1;
__m128i *outG2_128 = (__m128i *)outG2;
__m128i *outB_128 = (__m128i *)outB;
__m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff);
__m128i midpoint1 = _mm_set1_epi16(32768>>6);
__m128i midpoint2 = _mm_set1_epi16(32768>>5);
int column_step = 8;
int post_column = (info->width) - ((info->width) % column_step);
for (x=0; x < post_column; x += column_step)
{
__m128i r_128;
__m128i g1_128;
__m128i g2_128;
__m128i b_128;
__m128i g_128;
__m128i rg_128;
__m128i bg_128;
__m128i gd_128;
g_128 = _mm_load_si128(G_128++);
rg_128 = _mm_load_si128(RG_128++);
bg_128 = _mm_load_si128(BG_128++);
gd_128 = _mm_load_si128(GD_128++);
g_128 = _mm_srli_epi16(g_128, 6);
rg_128 = _mm_srli_epi16(rg_128, 5);
bg_128 = _mm_srli_epi16(bg_128, 5);
gd_128 = _mm_srli_epi16(gd_128, 6);
gd_128 = _mm_subs_epi16(gd_128, midpoint1);
rg_128 = _mm_subs_epi16(rg_128, midpoint2);
bg_128 = _mm_subs_epi16(bg_128, midpoint2);
r_128 = _mm_adds_epi16(rg_128, g_128);
b_128 = _mm_adds_epi16(bg_128, g_128);
g1_128 = _mm_adds_epi16(g_128, gd_128);
g2_128 = _mm_subs_epi16(g_128, gd_128);
r_128 = _mm_adds_epi16(r_128, limiter);
r_128 = _mm_subs_epu16(r_128, limiter);
g1_128 = _mm_adds_epi16(g1_128, limiter);
g1_128 = _mm_subs_epu16(g1_128, limiter);
g2_128 = _mm_adds_epi16(g2_128, limiter);
g2_128 = _mm_subs_epu16(g2_128, limiter);
b_128 = _mm_adds_epi16(b_128, limiter);
b_128 = _mm_subs_epu16(b_128, limiter);
_mm_store_si128(outR_128++, r_128);
_mm_store_si128(outG1_128++, g1_128);
_mm_store_si128(outG2_128++, g2_128);
_mm_store_si128(outB_128++, b_128);
}
G = (PIXEL16U *)G_128;
RG = (PIXEL16U *)RG_128;
BG = (PIXEL16U *)BG_128;
GD = (PIXEL16U *)GD_128;
outR = (PIXEL16U *)outR_128;
outG1 = (PIXEL16U *)outG1_128;
outG2 = (PIXEL16U *)outG2_128;
outB = (PIXEL16U *)outB_128;
}
#endif
for(; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
//Red-grn phase
*outR++ = r>>6;
*outG1++ = g1>>6;
*outG2++ = g2>>6;
*outB++ = b>>6;
}
}
}
// Pack the rows of Bayer data (full resolution progressive) into BYR4 format?
else if (format == DECODED_FORMAT_BYR4)
{
int bayer_format = decoder->cfhddata.bayer_format;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
//int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
int32_t r, g, b, rg, bg, gd, g1, g2;
// The output of the inverse transform is unsigned 16-bit integers
const int midpoint = 32768;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - midpoint;
r = ((rg - midpoint)<<1) + g;
b = ((bg - midpoint)<<1) + g;
g1 = g + gd;
g2 = g - gd;
r = SATURATE_16U(r);
g1 = SATURATE_16U(g1);
g2 = SATURATE_16U(g2);
b = SATURATE_16U(b);
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
default:
// Unsupported Bayer format
assert(0);
*outA16++ = 0;
*outA16++ = 0;
*outB16++ = 0;
*outB16++ = 0;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
for(y=2; y<info->height-3; y++)
{
//int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
// Linear restore
{
unsigned short *buff = (unsigned short *)output;
//static int pos = 0;
for(y=0; y<info->height*2; y++)
{
for(x=0; x<info->width*2; x++)
{
float val = (float)buff[y*info->width*2 + x]/65535.0f;
float encode_curvebase = 90.0;
int encode_curve_type = CURVE_TYPE_LOG;
int encode_curve_neg;
if((decoder->cfhddata.encode_curve)>>16) //1 or 2
{
encode_curve_type = (decoder->cfhddata.encode_curve)>>16;
if(encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR)
encode_curve_type = CURVE_TYPE_LINEAR;
encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
switch(encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = CURVE_LOG2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_GAMMA:
val = CURVE_GAM2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINEON:
val = CURVE_CINEON2LIN(val,encode_curvebase);
break;
case CURVE_TYPE_CINE985:
val = CURVE_CINE9852LIN(val,encode_curvebase);
break;
case CURVE_TYPE_PARA:
val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff));
break;
case CURVE_TYPE_CSTYLE:
val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff));
break;
case CURVE_TYPE_SLOG:
val = CURVE_SLOG2LIN((float)val);
break;
case CURVE_TYPE_LOGC:
val = CURVE_LOGC2LIN((float)val);
break;
case CURVE_TYPE_LINEAR:
default:
break;
}
buff[y*info->width*2 + x] = (int)(val*4095.0);
}
}
}
}
else
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[8192*3],*sptr;
//unsigned short scanline2[8192*3],*sptr2;
unsigned short *scanline,*sptr;
unsigned short *scanline2,*sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
uint8_t *outyuv,*line = output;
PIXEL *bayerptr;
int x,y;
if(buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int r,g,b,rg,bg,y1,y2,u,v;
int r1,g1,b1;
int i;
__m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg;
__m128i rrrrrrrr,bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
__m128i mid16384 = _mm_set1_epi16(16384);
__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff);
int sse2width = info->width & 0xfff8;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
sptr = scanline;
x = 0;
for(; x<sse2width; x+=8)
{
gggggggg = _mm_loadu_si128((__m128i *)G); G+=8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8;
ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
//limit to 0 to 16383
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
*sptr++ = _mm_extract_epi16(rrrrrrrr, 0);
*sptr++ = _mm_extract_epi16(gggggggg, 0);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 0);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 1);
*sptr++ = _mm_extract_epi16(gggggggg, 1);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 1);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 2);
*sptr++ = _mm_extract_epi16(gggggggg, 2);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 2);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 3);
*sptr++ = _mm_extract_epi16(gggggggg, 3);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 3);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 4);
*sptr++ = _mm_extract_epi16(gggggggg, 4);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 4);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 5);
*sptr++ = _mm_extract_epi16(gggggggg, 5);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 5);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 6);
*sptr++ = _mm_extract_epi16(gggggggg, 6);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 6);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 7);
*sptr++ = _mm_extract_epi16(gggggggg, 7);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 7);
}
for(; x<info->width; x++)
{
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
if(r < 0) r = 0; if(r > 0xffff) r = 0xffff;
if(g < 0) g = 0; if(g > 0xffff) g = 0xffff;
if(b < 0) b = 0; if(b > 0xffff) b = 0xffff;
*sptr++ = r;
*sptr++ = g;
*sptr++ = b;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch,
info->format, whitebitdepth, flags);
}
line += pitch;
bayer_line += bayer_pitch;
}
#endif
}
/* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201
// Pack the rows of Bayer data (full resolution progressive) into BYR2 format?
else if (format == DECODED_FORMAT_YUYV)
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale);
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
bayer_line = decoder->RawBayer16;
scale = 16384.0;
//_mm_empty(); // Clear the mmx register state
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale * 4.0);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale * 4.0);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale * 4.0);
scale = 4096.0;
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else //RGBs
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
r_rmult = (mtrx[0][0]) * scale * whitebalance[0];
r_gmult = (mtrx[0][1]) * scale * whitebalance[1];
r_bmult = (mtrx[0][2]) * scale * whitebalance[2];
r_offset= (mtrx[0][3]) * scale;
g_rmult = (mtrx[1][0]) * scale * whitebalance[0];
g_gmult = (mtrx[1][1]) * scale * whitebalance[1];
g_bmult = (mtrx[1][2]) * scale * whitebalance[2];
g_offset= (mtrx[1][3]) * scale;
b_rmult = (mtrx[2][0]) * scale * whitebalance[0];
b_gmult = (mtrx[2][1]) * scale * whitebalance[1];
b_bmult = (mtrx[2][2]) * scale * whitebalance[2];
b_offset= (mtrx[2][3]) * scale;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = RG + bayer_pitch/4;
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 127);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
// g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO : need on convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
//g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO: Need to convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO: Need to convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
line += pitch;
bayer_line += bayer_pitch;
}
}
*/
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
int precision = codec->precision;
if(decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width*info->height*num_channels*sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL);
}
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL);
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
frame_size = info->width*decoded_height*4*4*sizeof(PIXEL);
#if _ALLOCATOR
{
ALLOCATOR *allocator = decoder->allocator;
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, frame_size, 16);
}
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
decoder->RGBFilterBufferSize = frame_size;
}
//#endif
if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
//TODO: Replace this memory allocation with a scratch buffer allocation
if(decoder->RawBayer16)
{
uint8_t *outyuv,*line, *source_line;
PIXEL16U *bayerptr;
PIXEL16U *G,*RG,*BG;
int x,y;
int src_pitch = info->width*num_channels*sizeof(PIXEL);
int y_rmult,y_gmult,y_bmult,y_offset;//shift=8;
int u_rmult,u_gmult,u_bmult,u_offset;
int v_rmult,v_gmult,v_bmult,v_offset;
float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f/256.0f},
{-0.101f,-0.338f, 0.439f, 0.5f},
{0.439f,-0.399f,-0.040f, 0.5}
};
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, src_pitch,
info, chroma_offset, precision);
#else
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, src_pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
if (format == DECODED_FORMAT_YUYV)
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 256.0;
y_rmult = (int)((rgb2yuv[0][0]));
y_gmult = (int)((rgb2yuv[0][1]));
y_bmult = (int)((rgb2yuv[0][2]));
y_offset= (int)((rgb2yuv[0][3]));
u_rmult = (int)((rgb2yuv[1][0]));
u_gmult = (int)((rgb2yuv[1][1]));
u_bmult = (int)((rgb2yuv[1][2]));
u_offset= (int)((rgb2yuv[1][3]));
v_rmult = (int)((rgb2yuv[2][0]));
v_gmult = (int)((rgb2yuv[2][1]));
v_bmult = (int)((rgb2yuv[2][2]));
v_offset= (int)((rgb2yuv[2][3]));
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
source_line += src_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 16384.0;
y_rmult = (int)((rgb2yuv[0][0]) * scale);
y_gmult = (int)((rgb2yuv[0][1]) * scale);
y_bmult = (int)((rgb2yuv[0][2]) * scale);
y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f);
u_rmult = (int)((rgb2yuv[1][0]) * scale);
u_gmult = (int)((rgb2yuv[1][1]) * scale);
u_bmult = (int)((rgb2yuv[1][2]) * scale);
u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f);
v_rmult = (int)((rgb2yuv[2][0]) * scale);
v_gmult = (int)((rgb2yuv[2][1]) * scale);
v_bmult = (int)((rgb2yuv[2][2]) * scale);
v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f);
scale = 4096.0;
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
r1 = r;
g1 = g;
b1 = b;
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
source_line += src_pitch;
}
}
else //RGBs
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
unsigned short *rgb16 = (unsigned short *)line;
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch/(2*num_channels);
BG = RG + src_pitch/(2*num_channels);
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 255);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else if(info->format == DECODED_FORMAT_RGB24)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++<<1) - (128<<9)) + G1;
B1 = ((*BG++<<1) - (128<<9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
else if(info->format == DECODED_FORMAT_RG48)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
G1 = (*G++);
R1 = (*RG++);
B1 = (*BG++);
*rgb16++ = R1;
*rgb16++ = G1;
*rgb16++ = B1;
}
}
line += pitch;
source_line += src_pitch;
}
}
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else // Output the frame in one of the RGB 8-bit formats
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
// Invert the bottom wavelet and convert the output to the requested color format
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
#else
TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
}
}
#if TIMING
// Count the number of progressive frames that were decoded
progressive_decode_count++;
#endif
}
STOP(tk_inverse);
#ifdef ADOBE_MEMORY_FUNCTIONS
if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) ||
(decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2))
{
#if _ALLOCATOR
if(decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#else
if(decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#endif
}
#endif
#if (0 && DEBUG)
if (logfile) {
//uint8_t *subimage = output;
uint8_t *subimage = output + (2 * info->width) - 16;
DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile);
}
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Exit ReconstructFrameToBuffer\n");
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
}
#if 0
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
uint8_t *frame1, uint8_t *frame2, int output_pitch,
FRAME_INFO *info, char *buffer, size_t buffer_size)
{
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *bufptr = (PIXEL *)buffer;
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Check that there is enough space for the intermediate results from each channel
assert(output_width * sizeof(PIXEL) < buffer_size);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[3];
IMAGE *high_wavelet = transform_array[channel]->wavelet[2];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
}
for (row = 0; row < output_height; row++)
{
char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
// Invert the temporal transform at quarter resolution
InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]);
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
// Pack the intermediate results into the output row
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width);
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#else
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// The pixels are descaled in the inverse temporal transform
//const int descale = 0;
// Shift the intermediate results to 16-bit pixels
const int shift_yu64 = 8;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Initialize a pointer for allocating space in the buffer
PIXEL *bufptr = (PIXEL *)buffer;
// Array of pointers to the start of each channel in the intermediate results
PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS];
// Check that there is enough space for the intermediate results from each channel
#if DEBUG
assert(output_width * sizeof(PIXEL) < buffer_size);
#endif
ComputeCube(decoder);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[4];
IMAGE *high_wavelet = transform_array[channel]->wavelet[3];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Force the row of intermediate results to be properly aligned
bufptr = (PIXEL *)ALIGN16(bufptr);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
// Check that the row of intermediate results is properly aligned
assert(ISALIGNED16(channel_row_ptr[channel]));
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
//HACK: Seems to work, I don't know why. //DAN20070304
if (precision == 12) precision = 8;
// Apply the inverse temporal transform to the lowpass and highpass rows
for (row = 0; row < output_height; row++)
{
// Most of the color conversion routines use zero descaling
int descale = 0;
//char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
if (frame_index == 0)
{
// Invert the temporal transform at quarter resolution to get the even row
InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
else
{
assert(frame_index == 1);
// Invert the temporal transform at quarter resolution to get the odd row
InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
if(decoder->use_active_metadata_decoder)
{
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int i;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
info2.height = 1;
for(i=0;i<num_channels;i++)
{
channeldata[i] = (uint8_t *)channel_row_ptr[i];
channelpitch[i] = 0;
}
#if 1
{
__m128i *Y = (__m128i *)channeldata[0];
__m128i *U = (__m128i *)channeldata[1];
__m128i *V = (__m128i *)channeldata[2];
__m128i v;
int x;
__m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff);
for(x=0;x<info->width;x+=8)
{
v = _mm_load_si128(Y);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(Y++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(U);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(U++, v);
}
for(x=0;x<info->width/2;x+=8)
{
v = _mm_load_si128(V);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(V++, v);
}
}
#else
//non SSE2
for(x=0;x<info->width*2;x++)
{
int val = *gptr++;
if(val < 0) val = 0;
if(val > 4095) val = 4095;
val <<= 4;
*src++ = val;
}
src = scanline2;
#endif
Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch,
decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch);
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert the rows of luma and chroma into the output format
switch(format)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
// Pack the intermediate results into the output row
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
assert(0);//need quarter res BAYER To YUV decoder
}
else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, info->colorspace, format);
}
else
{
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format);
}
break;
case COLOR_FORMAT_RGB24:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL,
output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width,
shift_yu64, precision, format);
break;
case COLOR_FORMAT_B64A:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, COLOR_FORMAT_B64A, color_space);
}
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format);
}
#endif
assert(0);
break;
}
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
#if 0
// Copy the quarter resolution lowpass channels from the spatial transform
void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *wavelet = transform_array[channel]->wavelet[1];
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
for (row = 0; row < output_height; row++)
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision);
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
#endif
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the wavelets with quarter resolution
const int wavelet_index = 1;
IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index];
// The wavelet should have been reconstructed
assert(wavelet != NULL);
// The lowpass band should be valid
assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0);
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
ComputeCube(decoder);
//HACK DAN20110122 -- some formats will not directly decode so need to use the AM route
{
if( format == COLOR_FORMAT_YU64 ||
format == COLOR_FORMAT_V210 ||
format == COLOR_FORMAT_R408 ||
format == COLOR_FORMAT_V408)
{
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
}
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_row_ptr;
mailbox->pitch = output_pitch;
mailbox->framenum = 0;
for(channel = 0; channel < num_channels; channel++)
{
mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel];
mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL);
}
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert each row to the specified output format
for (row = 0; row < output_height; row++)
{
// Right shift for converting lowpass coefficients to pixels
int descale = 4;
switch(format & 0x7fffffff)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, info->colorspace, format);
}
else
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width,
precision, format);
}
break;
case COLOR_FORMAT_RGB24:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
case COLOR_FORMAT_RGB32_INVERTED:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3],
output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0, num_channels);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
//TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122
//
}
else
{
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format);
}
break;
case COLOR_FORMAT_B64A:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
assert(0);
break;
}
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++) {
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
}
// Release all resources allocated by the decoder
void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms)
{
#if _TIMING && 0
FILE *logfile = decoder->logfile;
uint32_t frame_count = decoder->frame_count;
if (logfile != NULL && frame_count > 0)\
{
#ifdef _WINDOWS
PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME);
#else
PrintStatistics(logfile, frame_count, NULL, NULL);
#endif
}
#endif
// Free the data structures allocated for decoding
ClearDecoder(decoder);
}
void DecodeForceMetadataRefresh(DECODER *decoder)
{
CFHDDATA *cfhddata = &decoder->cfhddata;
cfhddata->force_metadata_refresh = true;
if (decoder->parallelDecoder) {
cfhddata = &decoder->parallelDecoder->cfhddata;
cfhddata->force_metadata_refresh = true;
}
}
void SetDecoderFlags(DECODER *decoder, uint32_t flags)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Set the decoder flags
decoder->flags = flags;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
}
void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution)
{
// Need to modify the codec to use the decoding format
decoder->frame.width = width;
decoder->frame.height = height;
if(format == DECODED_FORMAT_WP13)
{
decoder->frame.output_format = format;
//decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively.
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else if(format == DECODED_FORMAT_W13A)
{
decoder->frame.output_format = format;
// decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else
{
decoder->frame.output_format = format;
decoder->frame.format = format;
//decoder->frame.signed_pixels = 0;
decoder->frame.white_point = 16;
}
decoder->frame.resolution = resolution;
decoder->frame.pixel_size = PixelSize(decoder->frame.format);
}
void SetDecoderCapabilities(DECODER *decoder)
{
int processor_count;
#ifdef _WINDOWS
int limit_cpus = 32;
#else
int limit_cpus = 32; // AJA spins off too many
#endif
// Set the capabilities that are most likely supported by the Intel Mac
decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2);
if (decoder->thread_cntrl.limit)
{
limit_cpus = decoder->thread_cntrl.limit;
}
else if (decoder->thread_cntrl.affinity)
{
int i;
const int max_cpu_count = 32;
limit_cpus = 0;
for (i = 0; i < max_cpu_count; i++)
{
if (decoder->thread_cntrl.affinity & (1<<i)) {
limit_cpus++;
}
}
}
// Set the number of processors
processor_count = GetProcessorCount();
if(processor_count > limit_cpus)
processor_count = limit_cpus;
#if (0 && DEBUG)
// Set the number of processors (for debugging)
//processor_count = 8;
processor_count = 1;
fprintf(stderr, "Limit processors to %d\n", processor_count);
#endif
decoder->thread_cntrl.capabilities |= (processor_count << 16);
}
int GetDecoderCapabilities(DECODER *decoder)
{
return decoder->thread_cntrl.capabilities;
}
bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags)
{
if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE)
{
decoder->frame.colorspace = color_flags;
// Indicate that the color flags were set as specified
return true;
}
// The specified color flags were not valid
return false;
}
// Compute the resolution corresponding to the specified combination of input and output dimensions
int DecodedResolution(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width;
int decoded_height;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
if (output_width == input_width && output_height == input_height) {
return DECODED_RESOLUTION_FULL;
}
// Compute the dimensions for half resolution decoding
decoded_width = input_width / 2;
decoded_height = input_height / 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_HALF;
}
// Compute the dimensions for quarter resolution decoding
decoded_width /= 2;
decoded_height /= 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height) {
return DECODED_RESOLUTION_QUARTER;
}
return DECODED_RESOLUTION_UNSUPPORTED;
}
// Compute the decoded resolution that is closest to the output dimensions
int DecodedScale(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width = input_width;
int decoded_height = input_height;
static int decodedResolution[] =
{
DECODED_RESOLUTION_FULL,
DECODED_RESOLUTION_HALF,
DECODED_RESOLUTION_QUARTER
};
int reduction = 0;
int max_reduction = 2;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
#if 1
// Always decode to the next larger size
while (decoded_width > output_width &&
decoded_height > output_height &&
reduction < max_reduction)
{
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
}
#else
while (decoded_width*4 > output_width*5 &&
decoded_height*4 > output_height*5 &&
reduction < max_reduction)
{
#if 0
// Decode to a frame size that is larger than the output image
int reduced_width = decoded_width / 2;
int reduced_height = decoded_height / 2;
if (reduced_width >= output_width && reduced_height >= output_height)
{
decoded_width = reduced_width;
decoded_height = reduced_height;
reduction++;
}
else
{
break;
}
#else
// Better to scale up a smaller image than scale down a larger image
decoded_width /= 2;
decoded_height /= 2;
reduction++;
#endif
}
#endif
// Check that the decoded resolution is valid
assert(0 <= reduction && reduction <= max_reduction);
return decodedResolution[reduction];
}
void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution,
int *decoded_width_out, int *decoded_height_out)
{
switch (decoded_resolution)
{
default:
assert(0);
case DECODED_RESOLUTION_FULL:
*decoded_width_out = encoded_width;
*decoded_height_out = encoded_height;
break;
case DECODED_RESOLUTION_HALF:
*decoded_width_out = encoded_width / 2;
*decoded_height_out = encoded_height / 2;
break;
case DECODED_RESOLUTION_QUARTER:
*decoded_width_out = encoded_width / 4;
*decoded_height_out = encoded_height / 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
//TODO: Check that the lowpass dimensions are correct
*decoded_width_out = encoded_width / 8;
*decoded_height_out = encoded_height / 8;
break;
}
}
// Return true if the specified resolution is supported
bool IsDecodedResolution(int resolution)
{
if (resolution == DECODED_RESOLUTION_QUARTER) {
return true;
}
return (resolution == DECODED_RESOLUTION_FULL ||
resolution == DECODED_RESOLUTION_HALF);
}
// Return true if the encoded sample is a key frame
bool IsSampleKeyFrame(uint8_t *sample, size_t size)
{
bool key_frame_flag = false;
// Search the first twenty tags for the sample type
const int num_tags = 20;
int i;
BITSTREAM bitstream;
InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ);
for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE))
{
TAGVALUE segment = GetSegment(&bitstream);
if (segment.tuple.tag == CODEC_TAG_SAMPLE)
{
switch (segment.tuple.value)
{
case SAMPLE_TYPE_GROUP:
case SAMPLE_TYPE_FIRST:
case SAMPLE_TYPE_IFRAME:
key_frame_flag = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
case SAMPLE_TYPE_FRAME:
case SAMPLE_TYPE_SECOND:
case SAMPLE_TYPE_PFRAME:
default:
key_frame_flag = false;
break;
case SAMPLE_TYPE_GROUP_TRAILER:
case SAMPLE_TYPE_NONE:
case SAMPLE_TYPE_ERROR:
case SAMPLE_TYPE_CHANNEL:
assert(0); // Unexpected situation
key_frame_flag = false; // Report the sample as a non-key frame
break;
}
break; // Found the sample type
}
}
return key_frame_flag;
}
// Return the number of the more recent decoded frame
uint32_t DecodedFrameNumber(DECODER *decoder)
{
CODEC_STATE *codec = &decoder->codec;
if (decoder == NULL) return 0;
return codec->frame_number;
}
/***** Start of the new code for the finite state machine (FSM) decoder *****/
#if _PROCESSOR_DISPATCH
__declspec(cpu_dispatch(Pentium_4,Generic))
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Stub routine for processor specific dispatch
}
#endif
#if _PROCESSOR_GENERIC
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Generic))
#endif
// This version assumes that the row is a multiple of 8 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
//assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 8 byte blocks
assert(ISALIGNED(length, 8));
// Convert the length from pixels to 8-byte blocks
count = (length >> 3);
// This code assumes that at least one 8-byte block will be zeroed
assert(count > 0);
__asm
{
pxor mm0, mm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 8-byte blocks
loop: movq [eax], mm0 // Write 8 bytes of zeros
add eax, 8 // Advance to the next 8 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
//_mm_empty();
}
#endif
#if _PROCESSOR_PENTIUM_4
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Pentium_4))
#endif
#ifndef _WIN64
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
// Convert the length from pixels to 16-byte blocks
count = (length >> 4);
// This code assumes that at least one 16-byte block will be zeroed
assert(count > 0);
#if 1 //DANREMOVE
memset(rowptr, 0, length);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 16-byte blocks
loop: movdqa [eax], xmm0 // Write 16 bytes of zeros
add eax, 16 // Advance to the next 16 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
#endif
}
#else
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
memset(rowptr, 0, length);
}
#endif
#endif
#if (0 && _DEBUG)
// Functions for the finite state machine decoder (debug version)
static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index)
{
// Return the address of the next table entry in the finite state machine
return &fsm->next_state[index];
}
static void ResetFSM(FSM *fsm)
{
// Reset the state to the beginning of the finite state machine entries
fsm->next_state = fsm->entries;
}
static void UpdateFSM(FSM *fsm, int next)
{
// Change the state pointer to the next block of table entries
fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE);
}
#else
// Macros for the finite state machine decoder
#if _INDIVIDUAL_LUT
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0]
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next]
#define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index]
#define ResetFSMIndividual(fsm) fsm->next_state_index = 0
#define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next
#else
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE)
#endif
#endif
#if _DEBUG
static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSM(FSM *fsm)
{
int num_entries = FSM_INDEX_ENTRIES;
int i;
for (i = 0; i < num_entries; i++)
{
FSMENTRY *entry = &fsm->table.entries[0][i];
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
}
}
static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile) {
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
#endif
static inline int GetFastByte(BITSTREAM *stream)
{
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed--;
#endif
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
return byte;
}
#if 0
static inline int GetFastShort(BITSTREAM *stream)
{
// Adaptation of the code in GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(lpCurrentWord[0]);
int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]);
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord+2;
// Check that the high bits are zero
assert((word & ~BITMASK(16)) == 0);
return word;
}
#endif
// Must declare the byte swap function even though it is an intrinsic
//int _bswap(int);
#if 0
static inline int GetFastLong(BITSTREAM *stream)
{
uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord;
int word = *(lpCurrentWord)++;
//word = _bswap(word);
word = SwapInt32BtoN(word);
stream->lpCurrentWord = (uint8_t *)lpCurrentWord;
return word;
}
#endif
#if 0 //DAN20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = image;
int column = 0;
int32_t value;
size_t bytes_row_size = width * sizeof(PIXEL);
PIXEL *maxptr;
int length = width * sizeof(PIXEL);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
#if (0 && DEBUG)
zerorow_count = 0;
#endif
ZeroHighPassRow(rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow(rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, length);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
}
#endif
// Decode a subband of highpass coefficients using a finite state machine.
// One byte is read from the bitstream each time and decoded in two steps.
// New version that uses a buffer aligned to the cache for decoding.
#if 0
static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines)
{
// This routine assume that the cache line size is 64 bytes
assert(_CACHE_LINE_SIZE == 64);
// This routine assumes that the input pointer is aligned to a cache line
assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE));
// This routine assumes that at least one cache line will be written
assert(numCacheLines > 0);
#if __GNUC__
memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE);
#else
__asm
{
pxor xmm0, xmm0 // Zero a 16 byte register
mov eax, ptrCacheLines // Load the pointer to the memory block
mov ebx, numCacheLines // Load the count of the number of cache lines
loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores
movdqa [eax+16], xmm0
movdqa [eax+32], xmm0
movdqa [eax+48], xmm0
add eax, 64 // Advance to the next cache line
sub ebx, 1 // Decrement the number of cache lines
jg loop
}
#endif
// The routine returns the pointer to the cache line after zeroing the block
}
#endif
#if 0
static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length)
{
// Note that the length is in units of bytes (not pixels)
int count; // Number of 16-byte blocks to copy
// Check that the row length is an integer multiple of 16-byte blocks
assert(ISALIGNED(length, 16));
// Convert the row length to the number of 16-byte blocks to copy
count = length >> 4;
// This routine assumes that at least one 16 byte block will be copied
assert(count > 0);
#if __GNUC__
// Use standard memory copy
memcpy(rowptr, buffer, length);
#else
// Copy a multiple of 16 byte blocks
__asm
{
mov eax, rowptr // Load the pointer to the destination
mov ebx, buffer // Load the pointer to the source
mov ecx, count // Load the number of 16-byte blocks to copy
loop: movdqa xmm0, [ebx] // Load 16 bytes from the source
movntdq [eax], xmm0 // Copy 16 bytes to the destination
add eax, 16 // Advance to the group of 16 bytes
add ebx, 16
sub ecx, 1 // Decrement the number of blocks to copy
jg loop
}
#endif
}
#endif
// DecodeBandFSMBuffered is no longer used
#if 0 //dan20041030 not used
bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image,
int width, int height, int pitch,
int quantization, char *decoding_buffer, size_t decoding_buffer_size)
{
char *rowptr = (char *)image; // Pointer to current row
char *maxptr = rowptr + height * pitch; // Address of row after the last row
FSMENTRY *entry;
int index;
int byte;
int column = 0;
int32_t value;
size_t row_size;
size_t cache_row_size; // Size of a row in bytes
int cache_line_count; // Size of the buffer in cache lines
PIXEL *buffer; // Pixel pointer to the buffer
int length; // Length of row in bytes
// Check that the processing size allows two chunks per byte
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
// The bitstream buffer should be empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Compute the number of cache lines used in the buffer
row_size = width * sizeof(PIXEL);
cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT);
// Check that the buffer is large enough
assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE));
// This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes
length = pitch;
assert(length == ALIGN(row_size, 16));
// Cast the buffer pointer for pixel access
buffer = (PIXEL *)decoding_buffer;
// Zero the decoding buffer
ZeroHighPassBuffer(buffer, cache_line_count);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, entry->next_state);
// No magnitude values decoded?
if (entry->value0 == 0)
{
// No magnitudes decoded so just advance the column pointer
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// Only one magnitude value decoded?
else if (entry->value1 == 0)
{
// Process the magnitude value that was decoded
// Undo quantization and scaling
value = quantization * entry->value0;
// Advance to the column where the value should be placed
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
else // Two magnitude values were decoded
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width - 1) {
// Dequantize and store the first value
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
// Dequantize and store the second value
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
// Dequantize and store the first value in the current row
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
// Dequantize the second value
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
// Store the second value in the new row
buffer[column++] = SATURATE(value);
}
}
// Decode the second 4-bit chunk
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Copy the buffer to the row if not already beyond the band
if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length);
// Advance to the next row
rowptr += pitch;
// Zero the remaining rows in the subband
while (rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
// Reset the finite state machine to the root node in the Huffman tree
ResetFSM(fsm);
// Return indication that the band was fully decoded
return true;
}
// Set the finite state machine to the next state in the Huffman tree
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0) {
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0) {
// Undo quantization and scaling
int32_t value = quantization * entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
buffer[column] = SATURATE(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if (column < width-1) {
value = quantization * entry->value0;
buffer[column++] = SATURATE(value);
value = quantization * entry->value1;
buffer[column++] = SATURATE(value);
}
else {
value = quantization * entry->value0;
buffer[column] = SATURATE(value);
value = quantization * entry->value1;
// Advance to the next row
assert(rowptr < maxptr);
CopyRowBuffer(rowptr, buffer, length);
rowptr += pitch;
// Zero the decoding buffer if there are more rows to process
if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count);
// Reset the column to the beginning of the row
column = 0;
buffer[column++] = SATURATE(value);
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM, combine the two results decoded from one byte
bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization)
{
int index, skip;
uint8_t byte;
FSMENTRY *entry1, *entry2;
PIXEL *rowptr = image;
int row = 0, column = 0;
int32_t value,bytes_row_size = width*sizeof(PIXEL);
PIXEL *maxptr = rowptr + height*pitch;
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
ZeroHighPassRow(rowptr, width);
// Double check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
//byte = GetBits(stream, BITSTREAM_WORD_SIZE);
#if 0
byte = GetByte(stream);
if (stream->error != BITSTREAM_ERROR_OKAY) {
stream->error = VLC_ERROR_NOTFOUND;
return false;
}
#else
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
entry1 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry1->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
entry2 = GetFSMTableEntry(fsm, index);
UpdateFSM(fsm, entry2->next_state);
// Return when the subband is completely decoded
if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// If no magnitude value is decoded at the first step
if (entry1->value0 == 0) {
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if(entry2->value1 == 0) {
// Skip to the non-zero position
column += entry1->pre_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else {
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If only one magnitude is decoded at the first step
else if(entry1->value1 == 0) {
// Undo quantization and scaling
value = quantization * entry1->value0;
column += entry1->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else if (entry2->value1 == 0)
{
// Undo quantization and scaling
value = quantization * entry2->value0;
column += entry1->post_skip+entry2->pre_skip;
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If two magnitudes are decoded at the second step
else
{
column += entry1->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry2->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry2->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry2->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry2->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
}
}
// If two magnitudes are decoded at the first step
else {
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = quantization * entry1->value0;
rowptr[column++] = SATURATE(value);
value = quantization * entry1->value1;
rowptr[column++] = SATURATE(value);
}
else {
value = quantization * entry1->value0;
rowptr[column] = SATURATE(value);
value = quantization * entry1->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
column = 0;
rowptr[column++] = SATURATE(value);
}
// If two magnitudes are decoded at the first step
// then at most one more magnitude can be decoded at the second step
assert(entry2->value1 == 0);
// If no magnitude is decoded at the second step
if(entry2->value0 == 0) {
column += entry2->pre_skip; // entry2->pre_skip <=4 must be true
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
// If one magnitude is decoded at the second step
else {
column += entry2->pre_skip; // must be a small zero run
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
// Fill in the decoded magnitude
// Undo quantization and scaling
value = quantization * entry2->value0;
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE(value);
column += entry2->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if (rowptr < maxptr) ZeroHighPassRow(rowptr, width);
}
}
}
}
}
#endif
#if 0 //dan20041030 not used
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
int column = 0;
int32_t value;
PIXEL8S *maxptr;
int length = width * sizeof(PIXEL8S);
//ROI roi = {width, 1};
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
// Compute the address of the row after the last row in the band
maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
length = ALIGN16(length);
ZeroHighPassRow((PIXEL *)rowptr, length);
// Decode runs and magnitude values until the band end trailer is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER) {
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if(entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Zero out the whole subband from here on
rowptr += pitch;
while(rowptr < maxptr) {
ZeroHighPassRow((PIXEL *)rowptr, length);
rowptr += pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < width);
// Store the saturated value at the position found in the scan
rowptr[column] = SATURATE8S(value);
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= width)
{
// Compute the starting column for the next row
column -= width;
// Advance to the next row
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < width);
if(column < width-1) {
value = entry->value0;
rowptr[column++] = SATURATE8S(value);
value = entry->value1;
rowptr[column++] = SATURATE8S(value);
}
else {
value = entry->value0;
rowptr[column] = SATURATE8S(value);
value = entry->value1;
rowptr += pitch;
if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length);
column = 0;
rowptr[column++] = SATURATE8S(value);
}
}
}
}
#endif
// same as DecodeBandFSM8sNoGap but output to 16bit data
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = (PIXEL *)image;
PIXEL16S *bandendptr;
int value;
#if ERROR_TOLERANT
uint8_t *startCurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#endif
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
if (image == NULL) {
return false;
}
// Reset the decoder
ResetFSM(fsm);
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
SecondPass:
rowptr = (PIXEL16S *)image;
AlignBits(stream);
AlignBitsTag(stream);
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Same as DecodeBandFSM8sNoGap but output to 16bit data
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile)
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch)
#endif
{
int index, byte;
FSMENTRY *entry;
FSMENTRYFAST *entryfast;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
ptrdiff_t offset;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
#if (0 && DEBUG)
DebugOutputBitstreamPosition(stream);
DebugOutputBitstreamBytes(stream, 16);
#endif
// Reset the decoder
ResetFSM(fsm);
#if (0 && DEBUG)
DebugOutputFSM(fsm);
#endif
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
//memset(rowptr, 0, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 500;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the appropriate distance
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
}
offset = CurrentWord - startCurrentWord;
stream->lpCurrentWord += offset;
stream->nWordsUsed -= (int)offset;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(bandendptr >= rowptr)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
#if (0 && DEBUG)
PrintBitstreamPosition(stream, logfile);
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0)) {
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1)) {
rowptr[1] = value;//SATURATE(value);
}
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
// Reset the decoder
ResetFSM(fsm);
//This is been called with non-prequantized FSM
if(quant>1) level /= quant;
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if((rand() % 10) == 1)
stream->lpCurrentWord[rand()%50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 1000;
// Decode runs and magnitude values until the entire band is decoded
while(rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord);
stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord));
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if(stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if(abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
}
while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as
// zero runs. Therefore decoded magnitude values can be written down without the need to check
// if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM
// can be significantly reduced.
// Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps
// Original version that does not use a separate buffer for decoding
#if !_INDIVIDUAL_ENTRY
#if 0 //dan20041030 not used
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// This version of Huffman decoder assumes that one byte
// is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
//while (rowptr < bandendptr)
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
#if 1
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
#elif _SINGLE_FSM_TABLE
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte, i;
FSMENTRY *entry,*firstentry = fsm->table->firstentry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
pitch /= sizeof(PIXEL8S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Check that the decoder has not overrun the output array
//assert(rowptr < bandendptr);
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN
entry = firstentry+i; //DAN
// Return if the subband is decoded completely
if(entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#else
bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
PIXEL8S *rowptr = image;
PIXEL8S *bandendptr;
int32_t value;
#if 1
__declspec(align(4)) FSMENTRY buffer;
#endif
pitch /= sizeof(PIXEL8S);
// zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch*height);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr)) {
return true;
}
#endif
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if(entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->post_skip];
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntryIndividual(fsm, index);
// Return if the subband is decoded completely
if (entry == NULL)
{
assert(rowptr <= bandendptr);
ResetFSMIndividual(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSMIndividual(fsm, (entry->next_state));
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_skip];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->post_skip];
}
}
#endif
// Decode the highpass band coefficients but do not write them out - used in SIF mode
bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
pitch /= sizeof(PIXEL8S);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// decode the second 4-bit chunk
index = byte & ((1<<FSM_INDEX_SIZE)-1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER) {
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
}
}
#if _TIMING
extern TIMER tk_fastruns;
#endif
#if 0 //dan20041030 not used
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
int row = 0;
int pitch;
int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(pixel_type == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
//pitch = wavelet->pitch;
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
// Get one byte from the bitstream and decode 4 bits at a time
result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index);
DumpWaveletRow(wavelet, band_index, 0, logfile);
}
#endif
end:
STOP(tk_fastruns);
return true;
}
#endif
#if _DEQUANTIZE_IN_FSM
void ReQuantFSM(FSM *fsm, int quant)
{
int count = 0;
int i, j;
short *restore = &fsm->restoreFSM[0];
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
entry[j].value0 = restore[count++];
entry[j].value1 = restore[count++];
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm_table.entries_ind[i];
if(entry)
{
entry->value0 = restore[count++];
entry->value1 = restore[count++];
}
}
#endif
}
void DeQuantFSM(FSM *fsm, int quant)
{
int i, j;
if(fsm->LastQuant > 1 && fsm->LastQuant != quant)
{
ReQuantFSM(fsm, fsm->LastQuant);
}
else if(fsm->LastQuant == quant)
{
return;
}
if(fsm->InitizedRestore == 0)
{
short *restore = &fsm->restoreFSM[0];
int count = 0;
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
restore[count++] = entry[j].value0;
restore[count++] = entry[j].value1;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
restore[count++] = entry->value0;
restore[count++] = entry->value1;
}
}
#endif
fsm->InitizedRestore = 1;
}
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
if(entry[j].value0 < 0x7ff0) // band end trailer
entry[j].value0 *= quant;
entry[j].value1 *= quant;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if(entry)
{
if(entry->value0 < 0x7ff0) // band end trailer etc
entry->value0 *= quant;
entry->value1 *= quant;
}
}
#endif
fsm->LastQuant = quant;
}
#endif // _DEQUANTIZE_IN_FSM
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
//dan 7-11-03
bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height, int threading)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
int active_codebook = decoder->codec.active_codebook;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
int peaklevel = 0;
//int peaksize = 0;
PIXEL *peakbase = NULL;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n",
subband, decoder->codec.active_codebook, difference_coding);
}
#endif
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
// All rows are treated as one long row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
//pitch = wavelet->pitch8s; // Use the 8-bit pitch
pitch = wavelet->pitch;
peaklevel = codec->peak_table.level;
peakbase = codec->peak_table.base;
#if _THREADED
threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0;
if(threading)
{
decoder->entropy_worker_new.threads_used = 1;
{
//int start = stream->nWordsUsed;
int end;
struct entropy_data_new *data;
int next_queue_num = decoder->entropy_worker_new.next_queue_num++;
data = &decoder->entropy_worker_new.entropy_data[next_queue_num];
memcpy(&data->stream,stream, sizeof(BITSTREAM));
data->rowptr = rowptr;
data->width = width;
data->height = height;
data->pitch = pitch;
data->peaks = peakbase;
data->level = peaklevel;
data->quant = quant;
data->wavelet = wavelet;
data->band_index = band_index;
data->active_codebook = active_codebook;
data->difference_coding = difference_coding;
// Start only a particular threadid
if(next_queue_num == 0)
{
ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1);
#if _DELAYED_THREAD_START==0
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
}
else
{ // Set the work count to the number of rows to process
ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1);
}
{
unsigned short tag = *(stream->lpCurrentWord-8) << 8;
if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE))
{
int chunksize;
int value = *(stream->lpCurrentWord-6) << 8;
value |= *(stream->lpCurrentWord-5);
tag |= *(stream->lpCurrentWord-7);
tag = NEG(tag);
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
chunksize *= 4;
chunksize -= 8;
{
uint32_t *ptr = (uint32_t *)stream->lpCurrentWord;
ptr += (chunksize>>2);
if(*ptr != 0x00003800) // bandend
{
goto continuesearch;
}
}
stream->lpCurrentWord += chunksize;
stream->nWordsUsed -= chunksize;
end = stream->nWordsUsed;
}
else
{
continuesearch:
while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend
{
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
}
end = stream->nWordsUsed;
}
}
}
}
else
#endif // _THREADED
{
DeQuantFSM(fsm, quant);
if (peaklevel)
{
result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1);
}
else
{
#if _DEBUG
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile);
#else
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch);
#endif
}
if(difference_coding)
{
int x,y;
PIXEL *line = rowptr;
for(y=0;y<height;y++)
{
for(x=1;x<width;x++)
{
line[x] += line[x-1];
}
line += pitch/2;
}
}
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band_index);
}
}
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//end:
STOP(tk_fastruns);
return true;
}
bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
//int row = 0;
int pitch;
//int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if(fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is 8bit/pixel
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
#if 1 // Get one byte from the bitstream and decode 4 bits at a time
result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
//end:
STOP(tk_fastruns);
return true;
}
// The third version is also based on the finite state machine decoder with
// gaps between rows encoded as zero runs, but dequantization is performed as
// the highpass values are read from the bitstream and placed into a row buffer.
// The highpass values are not written into the wavelet highpass band.
// Eventually this routine will be merged into the routine DecodeTemporalBand8s
// since this routine contains code specific to the inverse temporal transform
// and DecodeTemporalBand8s has become a shell.
#if 0
bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height,
IMAGE *frame0, IMAGE *frame1)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
FILE *logfile = decoder->logfile;
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm;
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *lowpass = wavelet->band[0];
int lowpass_pitch = wavelet->pitch;
//PIXEL8S *rowptr;
int row = 0;
int pitch;
int row_width; // Width of the encoded row of highpass coefficients
PIXEL *even = frame0->band[0];
PIXEL *odd = frame1->band[0];
int even_pitch = frame0->pitch;
int odd_pitch = frame1->pitch;
int pixel_type = wavelet->pixel_type[band_index];
int quantization = wavelet->quantization[band_index];
PIXEL *buffer;
size_t buffer_size;
int index, byte;
FSMENTRY *entry;
int column = 0;
int32_t value;
int buffer_row_size;
PIXEL *highpass;
// Check that the wavelet into which the band will be decoded is valid
assert(wavelet != NULL);
if (wavelet == NULL) return false;
// Check that the finite state machine is valid
assert(fsm != NULL);
if (fsm == NULL) return false;
assert(size > 0);
if (size == 0) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check that the band was encoded using 8-bit signed coefficients
assert(pixel_type == PIXEL_TYPE_8S);
pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows
// Get the buffer for storing one row of dequantized highpass coefficients
buffer = (PIXEL *)decoder->buffer;
buffer_size = decoder->buffer_size;
// The finite state machine does not support a marker at the end of each row
assert(RUNS_ROWEND_MARKER == 0);
/***** Start of code included from DecodeBandFSM8s() *****/
// Check that one byte can be processes as two 4-bit nibbles
assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE));
// Check that the bitstream buffer is empty
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Convert the pitch to units of pixels
pitch /= sizeof(PIXEL8S);
buffer_row_size = pitch * sizeof(PIXEL);
lowpass_pitch /= sizeof(PIXEL);
even_pitch /= sizeof(PIXEL);
odd_pitch /= sizeof(PIXEL);
// Compute the address of the row after the last row in the band
//maxptr = rowptr + height * pitch;
// Round up the row length (in bytes) to a multiple of 16 bytes
//row_size = ALIGN16(row_size);
// Check that the buffer is large enough to hold one row
//assert(buffer_size >= row_size);
assert(buffer_size >= buffer_row_size);
// Use the buffer for the row or highpass coefficients
highpass = buffer;
#if 1
// The row spans the allocated width (pitch) of the band in no gap mode
row_width = pitch;
#else
// For debugging
row_width = wavelet->encoded_pitch/sizeof(PIXEL8S);
#endif
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
// Decode zero runs and magnitude values (with appended sign bit)
// until the marker for the band end trailer has been decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
/***** Decode the first 4-bit nibble *****/
// Decode the first 4-bit nibble
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return when the entire band is decoded
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
assert(0 <= column && column < row_width);
// Dequantize the value and store it in the highpass row buffer
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
value = entry->value0;
highpass[column] = quantization * value;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
/***** Decode the second 4-bit nibble *****/
// Decode the second 4-bit nibble
index = byte & FSM_INDEX_MASK;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Process the rest of the subband
ZeroHighPassRow(highpass, buffer_row_size);
while (++row < height)
{
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
}
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// If no magnitude value is decoded
if (entry->value0 == 0)
{
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there is only one decoded magnitude value
else if (entry->value1 == 0)
{
value = entry->value0;
column += entry->pre_skip;
// The run length scan can go past the end of the row if the row ends
// with a run of zeros and the next row begins with a run of zeros
// Did the scan go beyond the end of the row?
while (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
// Fill in the decoded magnitude
// Check the column before storing the value
//assert(index < width);
assert(0 <= column && column < row_width);
highpass[column] = quantization * value;
column += entry->post_skip;
// Did the scan go beyond the end of the row?
if (column >= row_width)
{
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Compute the starting column for the next row
column -= row_width;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
}
}
// If there are two decoded magnitude values
else
{
// Check the column before storing values
assert(0 <= column && column < row_width);
if (column < (row_width - 1)) {
// Store both highpass values in the current row
highpass[column++] = quantization * entry->value0;
highpass[column++] = quantization * entry->value1;
}
else {
highpass[column] = quantization * entry->value0;
value = entry->value1;
// Dequantize the highpass coefficients
//DequantizeBandRow(rowptr, width, quantization, highpass);
// Apply the inverse temporal transform to the current row
InvertTemporalRow16s(lowpass, highpass, even, odd, width);
// Advance to the next lowpass input row
lowpass += lowpass_pitch;
// Advance to the next even and odd output rows
even += even_pitch;
odd += odd_pitch;
// Advance to the next row
row++;
// Clear the highpass buffer before decoding the non-zero coefficients
ZeroHighPassRow(highpass, buffer_row_size);
column = 0;
highpass[column++] = quantization * value;
}
}
}
/***** End of the code included from DecodeBandFSM8s() *****/
#if 0
assert(result == true);
if (result != true) {
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WINDOWS)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
#if 0
end:
return true;
#endif
}
#endif
/***** End of the code for the finite state machine decoder *****/
#if 1
// The second version applies the horizontal inverse filters row by row, so the
// memory access pattern is more efficient. The lowpass and highpass temporal
// coefficients for each row are inverted and packed into the output in one pass.
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
#if DEBUG
assert((2 * num_channels * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
//DAN20051004 -- possible reversiblity issue
//InvertHorizontalRow8sBuffered //----------------------- Maybe bad
InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
uint8_t *output_row_ptr = output;
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Divide the buffer space between the four threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
output += field_pitch * (half_height - 1);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // what about middle threads?
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
*/
// Loop until all of the rows have been processed
for (;;)
{
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * 2 * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
//PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size);
// assert(0 <= row && row < half_height);
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n",
row, thread_index, output_row_ptr);
}
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
#if (0 && DEBUG)
// Invert the horizontal transform by duplicating the lowpass pixels
InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel], horizontal_width[channel],
(PIXEL *)line_buffer);
#else
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
#endif
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
//horizontal_lowlow[channel] += pitch;
//horizontal_lowhigh[channel] += pitch;
//horizontal_highlow[channel] += pitch;
//horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the input transforms
//row += row_step;
// Advance to the next row in the packed output image
//output += field_pitch;
}
else
{
// No more rows to process
break;
}
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
//#if BUILD_PROSPECT
// Apply the inverse horizontal-temporal transform and output rows of luma and chroma
#if 0
void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset,
int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
#if DEBUG
assert((2 * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_row_ptr = output_buffer;
PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS];
int planar_pitch[TRANSFORM_MAX_CHANNELS];
ROI strip = {luma_width, 2};
uint8_t *yuv_output = (uint8_t *)output;
uint8_t *output1 = yuv_output;
uint8_t *output2 = yuv_output + output_pitch;
#else
PIXEL16U *output_row_ptr = output;
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
else
{
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
//***DEBUG***
//ZeroMemory(temporal_highpass, temporal_row_size);
//FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
#if (1 && DEBUG_ROW16U)
// Write the rows of 16-bit pixels to a temporary buffer
planar_output[channel] = output_row_ptr;
planar_pitch[channel] = output_pitch * sizeof(PIXEL);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
planar_output[channel], planar_pitch[channel],
output_row_width[channel],
frame_width, chroma_offset, precision);
//if (channel > 0)
if (0)
{
uint8_t *output3 = (uint8_t *)planar_output[channel];
uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel];
int output_size = output_row_width[channel] * sizeof(PIXEL);
int fill_value = (128 << 8);
//ZeroMemory(output3, output_size);
//ZeroMemory(output4, output_size);
FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value);
FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value);
}
#else
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
#endif
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
// Check the output row alignment
assert(ISALIGNED16(output_row_ptr));
}
// Advance to the next group of rows in the output image
output += field_pitch/sizeof(PIXEL16U);
}
}
//#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width/2;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
#if (1 && DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
#if 0
if (thread_index == 1)
{
// Skip over the buffer space used by the other thread
size_t buffer_usage = 2 * temporal_row_size;
buffer += buffer_usage;
buffer_size -= buffer_usage;
}
#else
// Divide the buffer space between the two threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
#endif
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
assert((2 * temporal_row_size) <= buffer_size);
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (1 && DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20) {
char label[_MAX_PATH];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
//horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
//output += field_pitch * (half_height - 1);
output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U);
output_pitch = NEG(output_pitch);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // middle threads
}
*/
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
// Loop until all of the rows have been processed
for (;;)
{
PIXEL16U *output_row_ptr;
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch*row;
horizontal_lowhigh[channel] += pitch*row;
horizontal_highlow[channel] += pitch*row;
horizontal_highhigh[channel] += pitch*row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
assert(0 <= row && row < half_height);
if(decoder->frame.resolution == DECODED_RESOLUTION_FULL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
}
else
{
// No more rows to process
break;
}
}
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
#if 0
DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param)
{
struct data
{
TRANSFORM *transform[3];
int frame_index;
int num_channels;
uint8_t *output;
int output_pitch;
FRAME_INFO *info;
SCRATCH *scratch;
int chroma_offset;
int precision;
} *dptr;
dptr = (struct data *)param;
TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels,
(PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info,
dptr->scratch, dptr->chroma_offset, dptr->precision);
return 0;
}
#endif
extern void fast_srand( int seed );
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
#if 0
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
char *buffer, size_t buffer_size, int chroma_offset,
int precision)
#else
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
#endif
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
//int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
size_t temporal_buffer_size = 2 * num_channels * temporal_row_size;
#if DEBUG
size_t yuv_row_size = frame_width * 2;
#endif
char *yuv_buffer;
size_t yuv_buffer_size;
int field_pitch = 2 * output_pitch;
int format = frame->format;
bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32);
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Allocate buffer space for the intermediate YUV data
yuv_buffer = buffer + temporal_buffer_size;
yuv_buffer_size = buffer_size - temporal_buffer_size;
#if DEBUG
assert(yuv_buffer_size >= 2 * yuv_row_size);
#endif
if (inverted)
{
output += (frame_height - 1) * output_pitch;
output_pitch = (- output_pitch);
field_pitch = (- field_pitch);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
//#if BUILD_PROSPECT
if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64)
{
// Invert the temporal bands from all channels and pack as V210 output
InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, chroma_offset, precision);
}
else
//#endif
{
// Invert the temporal bands from all channels and pack as 8-bit output
InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, frame->colorspace,
chroma_offset, precision, row);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format)
{
bool inverted = false;
size_t output_size;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
default: // Unsupported format (return a blank frame)
assert(0);
output_size = image->height * output_pitch;
memset(output_buffer, COLOR_CHROMA_ZERO, output_size);
break;
}
STOP(tk_convert);
}
void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height,
int output_pitch, bool inverted)
{
IMAGE *y_image = images[0];
IMAGE *u_image = images[1];
IMAGE *v_image = images[2];
int width = y_image->width;
int height = output_height;
PIXEL *y_row_ptr = y_image->band[0];
PIXEL *u_row_ptr = u_image->band[0];
PIXEL *v_row_ptr = v_image->band[0];
int y_pitch = y_image->pitch/sizeof(PIXEL);
int u_pitch = u_image->pitch/sizeof(PIXEL);
int v_pitch = v_image->pitch/sizeof(PIXEL);
uint8_t *outrow = output_buffer;
uint8_t *outptr;
int row, column;
// Definitions for optimization
//const int column_step = 2 * sizeof(__m64);
// Column at which post processing must begin
//int post_column = width - (width % column_step);
// The output pitch should be a positive number before inversion
assert(output_pitch > 0);
// Should the image be inverted?
if (inverted) {
outrow += (height - 1) * output_pitch; // Start at the bottom row
output_pitch = NEG(output_pitch); // Negate the pitch to go up
}
for (row = 0; row < height; row++)
{
outptr = outrow;
// Fill the rest of the output row
for (column = 0; column < width; column+=4)
{
int chroma_column = column>>1;
*(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5);
*(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5);
*(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5);
*(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5);
}
// Advance to the next rows in the input and output images
y_row_ptr += y_pitch;// 3D Work
u_row_ptr += u_pitch;
v_row_ptr += v_pitch;
outrow += output_pitch;
}
}
// Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer
void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth)
{
//IMAGE *image = frame->channel[0];
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int descale = precision - 8;
// Get the color format from the decoded format
int color_format = info->format & COLOR_FORMAT_MASK;
// Must compile this routine with switches set for decoding to 8-bit unsigned pixels
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
assert(0);
return;
#endif
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RG48:
if(encode_format == ENCODED_FORMAT_BAYER)
{
ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height,
output_pitch, 2, num_channels);
}
else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
int scale = 1;
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
scale = 2;
ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height,
output_pitch, scale, num_channels);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
case DECODED_FORMAT_RG64:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
assert(0);
}
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (precision == CODEC_PRECISION_10BIT)
{
int lineskip = 1; // 3D Work
int pitch = output_pitch;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
{
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work
{
lineskip = 2;
if(decoder->channel_blend_type == 3)
pitch *= 2;
}
}
if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side
{
SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted);
}
else
{
//ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip);
ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip);
}
}
else
{
//ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted);
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted);
}
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
#if 0
case DECODED_FORMAT_UYVY:
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
#endif
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
if (precision == CODEC_PRECISION_10BIT)
{
ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted);
}
else
{
//ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted);
assert(0);
}
break;
//#endif
case DECODED_FORMAT_YU64:
// DAN04262004
ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision);
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_YR16:
ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
#error Must set compile-time switches to decode to 8-bit pixels
#endif
START(tk_convert);
#if _ENCODE_CHROMA_OFFSET
#error Cannot handle images encoded with a non-zero chroma offset
#endif
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
START(tk_convert);
// Determine the type of conversion
switch(format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
//ConvertPlanarYUVToRGB
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
case COLOR_FORMAT_WP13:
case COLOR_FORMAT_B64A:
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL);
break;
case DECODED_FORMAT_YUYV:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
case DECODED_FORMAT_UYVY:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
// Convert one row of packed YUYV to the specified color
void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision)
{
size_t row_size = 2 * length;
bool inverted = false;
START(tk_convert);
// Determine the type of color conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision);
break;
case DECODED_FORMAT_YUYV:
if(precision == 8)
memcpy(output, input, row_size);
else
{
//need to dither to 8-bit
assert(0);
}
break;
case DECODED_FORMAT_UYVY:
if(precision == 8)
ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY);
else
{
//need to dither to 8-bit
assert(0);
}
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210);
break;
case DECODED_FORMAT_YU64:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
case DECODED_FORMAT_BYR3:
case DECODED_FORMAT_BYR4:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
memset(output, 0, row_size);
break;
}
STOP(tk_convert);
}
#if _THREADED_DECODER
IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index,
int width, int height, int level, int type)
{
IMAGE *wavelet = transform->wavelet[index];
assert(decoder != NULL && transform != NULL);
if (decoder != NULL && transform != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Lock access to the wavelet data
#if _DELAYED_THREAD_START==0
Lock(&decoder->entropy_worker_new.lock);
#endif
// Get the wavelet from the transform data structure (thread safe)
wavelet = transform->wavelet[index];
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
// Unlock access to the wavelet data
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
return wavelet;
}
// Update the codec state with the information in a tag value pair
CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
bool optional = false;
int chunksize = 0;
bool result;
// Is this an optional tag?
if (tag < 0) {
tag = NEG(tag);
optional = true;
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n",
tag, value, optional);
}
#endif
switch (tag)
{
case CODEC_TAG_ZERO: // Used internally
assert(0); // Should not occur in the bitstream
error = CODEC_ERROR_INVALID_BITSTREAM;
break;
case CODEC_TAG_SAMPLE: // Type of sample
//assert(0);
if (value == SAMPLE_TYPE_CHANNEL)
{
result = DecodeSampleChannelHeader(decoder, input);
if (!result)
error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_INDEX: // Sample index table
//assert(0); // Need to figure out how to return the group index
{
int count = value;
uint32_t *index = (uint32_t *)(&codec->channel_size[0]);
DecodeGroupIndex(input, index, count);
codec->num_channels = count;
}
break;
case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband?
{ // This tag is obsolete and not used in modern streams
int subband = value;
// Check that the subband number makes sense
assert(0 <= subband && subband <= codec->max_subband);
if (! (0 <= subband && subband <= codec->max_subband))
{
error = CODEC_ERROR_DECODING_SUBBAND;
break;
}
// Decompress the subband
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act.
codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete
{
// This tag value pair marks the beginning of the encoded coefficients
// The subband number has already been decoded
int subband = codec->band.subband;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_ENTRY: // Entry in sample index
assert(0); // Need to figure out how to return the group index
break;
case CODEC_TAG_MARKER: // Bitstream marker
{
int marker = value;
uint8_t *current_position;
// Save the current bitstream position
current_position = GetBitstreamPosition(input);
current_position -= 4; // Step back to before the GetSegment i.e. the TAG
if (IsLowPassHeaderMarker(marker))
{
// Save the bitstream position for the start of the channel
codec->channel_position = current_position;
}
else if (IsLowPassBandMarker(marker))
{
int subband = 0;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
}
break;
case CODEC_TAG_VERSION_MAJOR: // Version
assert(0);
break;
case CODEC_TAG_VERSION_MINOR: // Minor version number
assert(0);
break;
case CODEC_TAG_VERSION_REVISION: // Revision number
assert(0);
break;
case CODEC_TAG_VERSION_EDIT: // Edit number
assert(0);
break;
case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags
assert(0);
break;
case CODEC_TAG_TRANSFORM_TYPE: // Type of transform
assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST);
if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST)
{
int i;
codec->transform_type = value;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
else
error = CODEC_ERROR_TRANSFORM_TYPE;
break;
case CODEC_TAG_NUM_FRAMES: // Number of frames in the group
assert(0 <= value && value <= TRANSFORM_NUM_FRAMES);
if (0 <= value && value <= TRANSFORM_NUM_FRAMES)
codec->num_frames = value;
else
error = CODEC_ERROR_NUM_FRAMES;
break;
case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform
assert(value <= CODEC_MAX_CHANNELS);
if (value <= CODEC_MAX_CHANNELS)
codec->num_channels = value;
else
error = CODEC_ERROR_NUM_CHANNELS;
break;
case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform
assert(0 < value && value <= TRANSFORM_NUM_WAVELETS);
if (0 < value && value <= TRANSFORM_NUM_WAVELETS)
codec->num_wavelets = value;
else
error = CODEC_ERROR_NUM_WAVELETS;
break;
case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands
assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS);
if (0 < value && value <= TRANSFORM_NUM_SUBBANDS)
codec->num_subbands = value;
else
error = CODEC_ERROR_NUM_SUBBANDS;
break;
case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels
assert(0 < value && value <= TRANSFORM_NUM_SPATIAL);
if (0 < value && value <= TRANSFORM_NUM_SPATIAL)
codec->num_spatial = value;
else
error = CODEC_ERROR_NUM_SPATIAL;
break;
case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet
assert(value == TRANSFORM_FIRST_WAVELET);
if (value == TRANSFORM_FIRST_WAVELET)
codec->first_wavelet = value;
else
error = CODEC_ERROR_FIRST_WAVELET;
break;
case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel
assert(0);
break;
case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start
codec->frame.type = value;
break;
case CODEC_TAG_FRAME_WIDTH: // Width of the frame
codec->frame.width = value;
break;
case CODEC_TAG_FRAME_HEIGHT: // Height of the frame
codec->frame.height = value;
//DAN20080729 -- Initialize the default colorspace based on clip resolution
if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED)
{
int internalheight = value;
int internalwidth = codec->frame.width;
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
internalwidth *= 2;
internalheight *= 2;
}
if(internalheight > 576 || internalwidth > 720)
decoder->frame.colorspace |= COLOR_SPACE_CG_709;
else
decoder->frame.colorspace |= COLOR_SPACE_CG_601;
}
//if(decoder->frame.colorspace_filedefault)
// decoder->frame.colorspace = decoder->frame.colorspace_filedefault;
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
break;
case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729
if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709,
//there was a bug in 3.9.4 that had bayer flagged as 601.
if(decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422)
{
decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709);
decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709));
//Let the VSRGB status be controllable by the calling application (e.g. Vegas)
}
else
{
decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB);
decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB));
}
}
decoder->frame.colorspace_filedefault = value;
break;
case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA)
assert(0);
break;
case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels
codec->input_format = value;
// Set the encoded format if it has not already been set
// error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value);
break;
case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data
case CODEC_TAG_OLD_ENCODED_FORMAT:
codec->encoded_format = value;
if(codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3)
codec->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_INDEX: // Position of frame within the group
codec->frame.group_index = value;
break;
case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band
codec->lowpass.subband = value;
error = SetDefaultEncodedFormat(codec);
break;
case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels
codec->lowpass.level = value;
break;
case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band
codec->lowpass.width = value;
break;
case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band
codec->lowpass.height = value;
break;
case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset
codec->lowpass.margin.top = value;
break;
case CODEC_TAG_MARGIN_BOTTOM:
codec->lowpass.margin.bottom = value;
break;
case CODEC_TAG_MARGIN_LEFT:
codec->lowpass.margin.left = value;
break;
case CODEC_TAG_MARGIN_RIGHT:
codec->lowpass.margin.right = value;
break;
case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters
codec->lowpass.pixel_offset = value;
break;
case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding
codec->lowpass.quantization = value;
break;
case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel
codec->lowpass.bits_per_pixel = value;
break;
case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer
assert(0);
break;
case CODEC_TAG_WAVELET_TYPE: // Type of wavelet
codec->highpass.wavelet_type = value;
break;
case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform
codec->highpass.wavelet_number = value;
break;
case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform
codec->highpass.wavelet_level = value;
break;
case CODEC_TAG_NUM_BANDS: // Number of wavelet bands
codec->highpass.num_bands = value;
break;
case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band
codec->highpass.width = value;
break;
case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band
codec->highpass.height = value;
break;
case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete)
codec->highpass.lowpass_border = value;
break;
case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete)
codec->highpass.highpass_border = value;
break;
case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band
codec->highpass.lowpass_scale = value;
break;
case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band
codec->highpass.lowpass_divisor = value;
break;
case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer
assert(0);
break;
case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band
codec->band.number = value;
break;
case CODEC_TAG_BAND_WIDTH: // Band data width
codec->band.width = value;
break;
case CODEC_TAG_BAND_HEIGHT: // Band data height
codec->band.height = value;
break;
case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band
codec->band.subband = value;
//assert(value != 255);
break;
case CODEC_TAG_BAND_ENCODING: // Encoding method for this band
codec->band.encoding = value;
break;
case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band
codec->band.quantization = value;
break;
case CODEC_TAG_BAND_SCALE: // Band scale factor
codec->band.scale = value;
break;
case CODEC_TAG_BAND_TRAILER: // Band trailer
assert(0);
break;
case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values
assert(0);
break;
case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees
assert(0);
break;
case CODEC_TAG_NUM_POSITIVES: // Number of positive values
assert(0);
break;
case CODEC_TAG_NUM_NEGATIVES: // Number of negative values
assert(0);
break;
case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes
assert(0);
break;
case CODEC_TAG_CHANNEL: // Channel number
assert(0);
break;
case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream
//assert(0);
break;
//assert(0);
case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio
codec->picture_aspect_x = value;
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio
codec->picture_aspect_y = value;
//assert(0);
break;
case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding
// Progressive versus interlaced decoding is specified by the sample flags
error = UpdateCodecFlags(codec, value);
break;
case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream
codec->frame_number = value;
break;
// This TAG is now support as part of the universal decoder.
// Only Prospect HD builds can decode 10bit.
case CODEC_TAG_PRECISION: // Number of bits in the video source
codec->precision = value;
{
int i;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
break;
case CODEC_TAG_PRESCALE_TABLE:
{
int i;
int prescale[TRANSFORM_MAX_WAVELETS] = {0};
for(i=0;i<TRANSFORM_MAX_WAVELETS;i++)
prescale[i] = value >> (14-i*2) & 0x3;
for(i=0;i<TRANSFORM_MAX_CHANNELS;i++)
{
TRANSFORM *transform = decoder->transform[i];
if(transform)
{
memcpy(transform->prescale, prescale, sizeof(prescale));
}
}
}
break;
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
codec->version[0] = (value>>12) & 0xf;
codec->version[1] = (value>>8) & 0xf;
codec->version[2] = value & 0xff;
break;
case CODEC_TAG_QUALITY_L: //
codec->encode_quality &= 0xffff0000;
codec->encode_quality |= value;
break;
case CODEC_TAG_QUALITY_H: //
codec->encode_quality &= 0xffff;
codec->encode_quality |= value<<16;
break;
case CODEC_TAG_BAND_CODING_FLAGS:
codec->active_codebook = value & 0xf; // 0-15 valid code books
codec->difference_coding = (value>>4) & 1;
break;
// Peak table processing
case CODEC_TAG_PEAK_TABLE_OFFSET_L:
codec->peak_table.offset &= ~0xffff;
codec->peak_table.offset |= (value & 0xffff);
codec->peak_table.base = (PIXEL *)(input->lpCurrentWord);
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_TABLE_OFFSET_H:
codec->peak_table.offset &= 0xffff;
codec->peak_table.offset |= (value & 0xffff)<<16;
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_LEVEL:
codec->peak_table.level = value;
codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL);
break;
case CODEC_TAG_PEAK_TABLE:
//this is the chunk header, so we have peak data
codec->peak_table.level = 0; // reset for the next subband
//Just skip as the data was read ahead
chunksize = value;
chunksize &= 0xffff;
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
break;
#if (1 && DEBUG)
case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only)
assert(0);
break;
#endif
default: // Unknown tag
if(tag & 0x4000)
{
if(tag & 0x2000) // i.e. 0x6xxx = 24bit size.
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else // 16bit size
{
chunksize = value;
chunksize &= 0xffff;
}
}
else if(tag & 0x2000) //24bit LONGs chunk size
{
optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not
// updated the size and turned the tag to optional. TODO : WHY
chunksize = 0; // not not skip
// chunksize = value + ((tag & 0xff)<<16);
// do not skip an unknown but optional chunk
// These are only use to size subbands, but the data within should not be skipped
// unless
if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS)
{
optional = true;
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord;
decoder->uncompressed_size = chunksize*4;
decoder->sample_uncompressed = 1;
}
}
assert(optional);
if(!optional)
{
error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG;
}
else if(chunksize > 0) // skip this option chunk
{
input->lpCurrentWord += chunksize*4;
input->nWordsUsed -= chunksize*4;
}
break;
}
return error;
}
void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _THREADED_DECODER
// Lock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n",
wavelet->band_valid_flags, BAND_VALID_MASK(band));
}
#endif
// Update the wavelet band flags
wavelet->band_valid_flags |= BAND_VALID_MASK(band);
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _THREADED_DECODER
// Unlock access to the wavelet data
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
// Update the wavelet band flags
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _DELAYED_THREAD_START==0
if(decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type)
{
uint32_t threaded_band_mask;
uint32_t wavelet_band_mask;
uint32_t decoded_band_mask;
bool decoded_bands_valid;
// Has this wavelet been created?
if (wavelet == NULL)
{
// Too soon to wait for the wavelet bands to be decoded
return false;
}
// Is this a fieldplus transform?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
// Is this the temporal wavelet?
if (index == 2)
{
assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL);
assert(wavelet->num_bands == 2);
// Earlier transforms in the queue will compute both wavelet bands
return true;
}
// Is this wavelet at the end of a chain of transforms?
if (index == 3 || index == 5)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
// Is this a spatial transform?
else if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Is this wavelet at the top of the pyramid?
if (index == 2)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#if 0
// Is this wavelet at the bottom of the pyramid?
else if (index == 0)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#endif
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
else
{
// Unknown type of transform
assert(0);
// Assume that the bands are not valid
return false;
}
// Compute the mask for the bands in this wavelet
decoded_band_mask = ((1 << wavelet->num_bands) - 1);
// Clear the bit for the band computed by the threaded transform
decoded_band_mask &= ~threaded_band_mask;
// Compute the wavelet bands that have been decoded
wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask);
// Have all of the bands not computed by the transform thread been decoded?
decoded_bands_valid = (wavelet_band_mask == decoded_band_mask);
return decoded_bands_valid;
}
void QueueThreadedTransform(DECODER *decoder, int channel, int index)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
TRANSFORM *transform = decoder->transform[channel];
//IMAGE *wavelet = transform->wavelet[index];
int precision = codec->precision;
// The transform data structure must exist
assert(transform != NULL);
// The transform thread variables should have been created
{
int free_entry;
#if _DELAYED_THREAD_START==0
// Lock access to the transform queue
Lock(&decoder->entropy_worker_new.lock);
#endif
// Copy the transform parameters into the next queue entry
free_entry = decoder->transform_queue.free_entry;
assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH);
if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH)
{
assert(transform != NULL);
assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS);
assert(0 <= index && index < TRANSFORM_MAX_WAVELETS);
// Note: The wavelet may not exist when the transform is queued
decoder->transform_queue.queue[free_entry].transform = transform;
decoder->transform_queue.queue[free_entry].channel = channel;
decoder->transform_queue.queue[free_entry].index = index;
decoder->transform_queue.queue[free_entry].precision = precision;
decoder->transform_queue.queue[free_entry].done = 0;
// Update the transform request queue
decoder->transform_queue.free_entry++;
decoder->transform_queue.num_entries++;
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index);
}
#endif
}
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
#if _THREADED_DECODER
void WaitForTransformThread(DECODER *decoder)
{
if(decoder->entropy_worker_new.pool.thread_count)
{
#if _DELAYED_THREAD_START
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool);
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
}
}
#endif
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_YUV;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
PIXEL16U *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount,i;
// There are half as many input rows as output rows
int transform_height = (((info->height+7)/8)*8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = (uint8_t *)output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for(i=0; i<THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE);
}
DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam)
{
DECODER *decoder = (DECODER *)lpParam;
FILE *logfile = decoder->logfile;
struct interlace_data *data = &decoder->interlaced_worker.interlace_data;
int thread_index;
HANDLE hObjects[2];
DWORD dwReturnValue;
if(decoder->thread_cntrl.affinity)
{
HANDLE hCurrentThread = GetCurrentThread();
SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity);
}
// Set the handler for system exceptions
#ifdef _WINDOWS
SetDefaultExceptionHandler();
#endif
// Determine the index of this worker thread
if(decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
thread_index = decoder->interlaced_worker.thread_count++;
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// The transform worker variables should have been created
assert(decoder->interlaced_worker.start_event[thread_index] != NULL);
assert(decoder->interlaced_worker.row_semaphore != NULL);
assert(decoder->interlaced_worker.done_event[thread_index] != NULL);
assert(decoder->interlaced_worker.stop_event != NULL);
if (!(decoder->interlaced_worker.start_event[thread_index] != NULL &&
decoder->interlaced_worker.row_semaphore != NULL &&
decoder->interlaced_worker.done_event[thread_index] != NULL &&
decoder->interlaced_worker.stop_event != NULL)) {
return 1;
}
hObjects[0] = decoder->interlaced_worker.start_event[thread_index];
hObjects[1] = decoder->interlaced_worker.stop_event;
for (;;)
{
// Wait for the signal to begin processing a transform
dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE);
// Received a signal to begin inverse transform processing?
if (dwReturnValue == WAIT_OBJECT_0)
{
int type; // Type of inverse transform to perform
int frame_index; // Index of output frame to produce
int num_channels; // Number of channels in the transform array
uint8_t *output; // Output frame buffer
int pitch; // Output frame pitch
FRAME_INFO info; // Format of the output frame
int chroma_offset; // Offset for the output chroma
int precision; // Source pixel bit depth
// Lock access to the transform data
if(decoder->interlaced_worker.lock_init) {
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
// Get the processing parameters
type = data->type;
frame_index = data->frame;
num_channels = data->num_channels;
output = data->output;
pitch = data->pitch;
memcpy(&info, &data->info, sizeof(FRAME_INFO));
chroma_offset = data->chroma_offset;
precision = data->precision;
// Unlock access to the transform data
if(decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// Select the type of inverse transform to perform
switch (type)
{
case THREAD_TRANSFORM_FRAME_YUV:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels,
output, pitch, &info, chroma_offset, precision);
break;
case THREAD_TRANSFORM_FRAME_ROW16U:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels,
(PIXEL16U *)output, pitch, &info, chroma_offset, precision);
break;
default:
assert(0);
break;
}
// Signal that this thread is done
SetEvent(decoder->interlaced_worker.done_event[thread_index]);
}
else
{
// Should have a condition that causes the thread to terminate
assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED);
break;
}
}
return 0;
}
#endif
void GetDecodedFrameDimensions(TRANSFORM **transform_array,
int num_channels,
int frame_index,
int resolution,
int *decoded_width_out,
int *decoded_height_out)
{
IMAGE *wavelet = NULL;
int decoded_scale = 0;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
// Get the decoding scale
switch(resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_FULL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
// Is this an intra frame?
if (wavelet == NULL) {
wavelet = transform_array[0]->wavelet[2];
}
break;
default:
assert(0);
break;
}
// Compute the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
if (decoded_width_out) {
*decoded_width_out = decoded_width;
}
if (decoded_height_out) {
*decoded_height_out = decoded_height;
}
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
//int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_W13A: //DAN20101207 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YUYV: //?
case DECODED_FORMAT_UYVY: //?
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
error = CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR2:
case DECODED_FORMAT_BYR4:
{
//bool linearRestore = false;
unsigned short *curve = NULL;
if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
curve = decoder->BYR4LinearRestore;
}
ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve);
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR3:
ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
frame_size = width * height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
}
// Using the RGBFilterBuffer16 as scratch space
ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
#if _THREADED
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct uncompressed v210 YUV format to the requested output format
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
uint32_t *input_ptr = (uint32_t *)src;
int pos = 0;
int column=0,length = width;
length -= length % 6; //DAN03252004 -- fix a memory overflow.
for (column=0; column < length; column += 6)
{
uint32_t yuv;
int y;
int u;
int v;
// Read the first word
yuv = *(input_ptr++);
u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
// Expand the pixels to sixteen bits
u <<= 6;
y <<= 6;
v <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the second word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
// Read the third word
yuv = *(input_ptr++);
v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
// Read the fourth word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(u)>>8;
v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y)>>8;
dst[pos++] = SATURATE_16U(v)>>8;
}
if(format == DECODED_FORMAT_UYVY)
{
for (column=0; column < pos; column += 2)
{
int t = dst[column];
dst[column] = dst[column+1];
dst[column+1] = t;
}
}
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
for(i=0; i<lines; i++)
{
src = (uint8_t *)decoder->uncompressed_chunk;
src += row * unc_Stride;
// Repack the row of 10-bit pixels into 16-bit pixels
ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2);
// Advance to the next rows in the input and output images
y_row_ptr += orig_width*2;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width/2;
}
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + width;
v_row_ptr = u_row_ptr + width/2;
if(lines == 2)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2;
}
else if(lines == 4)
{
for(i=0; i<width*2;i++)
y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2;
}
roi.width = width;
roi.height = 1;
planar_output[0] = (uint8_t *)y_row_ptr;
planar_output[1] = (uint8_t *)v_row_ptr;
planar_output[2] = (uint8_t *)u_row_ptr;
planar_pitch[0] = 0;
planar_pitch[1] = 0;
planar_pitch[2] = 0;
if(decoder->apply_color_active_metadata)
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline,
info->format, &whitebitdepth, &flags);
}
else
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
}
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct uncompressed DPX0 RGB format to the requested output format
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
//int output_format = info->output_format; // used by image_dev_only decodes
int width = info->width;
int height = info->height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) &&
resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if(unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if(format != DECODED_FORMAT_DPX0)
{
int unc_Stride = decoder->uncompressed_size / height;
ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format);
}
if(unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for(y=0; y<height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if(resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if(resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if(decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if(decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row,lines = 1;
int start,end;
if(resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if(resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit
{
start = height-1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i,unc_Stride = decoder->uncompressed_size / orig_height;
whitebitdepth = 13;
if(decoder->apply_color_active_metadata)
flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR;
else
flags = 0;
roi.width = width;
roi.height = 1;
if(lines == 1)
{
uint16_t *sptr;
uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2);
sptr = (uint16_t *)lptr;
for(i=0; i<width;i+=8)
{
int val,r,g,b;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8; j++)
{
ptr[j] = sptr[0] >> 3;
ptr[j+8] = sptr[1] >> 3;
ptr[j+16] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8; j++)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+8] = g;
ptr[j+16] = b;
}
}
}
else
{
if(decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for(j=0; j<8*3; j+=3)
{
ptr[j] = sptr[0] >> 3;
ptr[j+1] = sptr[1] >> 3;
ptr[j+2] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for(j=0; j<8*3; j+=3)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j+1] = g;
ptr[j+2] = b;
}
}
}
ptr += 24;
}
}
else if(lines == 2)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>2)+1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
else if(lines == 4)
{
uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride>>2) * lines;
for(i=0; i<width;i+=8)
{
int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4;
for(j=0; j<8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride>>1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride>>1)+2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r>>2;
ptr[j+8] = g>>2;
ptr[j+16] = b>>2;
}
else
{
ptr[j*3] = r>>2;
ptr[j*3+1] = g>>2;
ptr[j*3+2] = b>>2;
}
lptr += lines;
}
ptr += 24;
}
}
sptr = scanline;
if(decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
int resolution = info->resolution;
//int format = info->format;
// Switch to the subroutine for the requested resolution
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch);
break;
case DECODED_RESOLUTION_FULL:
//return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
//case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
//return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_QUARTER:
//return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// The decoded resolution is not supported by this routine
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to full resolution
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
//int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
//TODO: Need to add more output formats to this routine
switch (format)
{
case DECODED_FORMAT_RGB32:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
// TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
// ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
case DECODED_FORMAT_RGB24:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
//TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
//ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data and demosaic to full resolution
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
error = CODEC_ERROR_OKAY;
break;
}
if(error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0)) {
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if(decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size*3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size*4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size*3;
}
//#endif
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL),
info, chroma_offset, precision);
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct Bayer encoded data to half resolution
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int frame_width = info->width;
int frame_height = info->height;
//int resolution = info->resolution;
int format = info->format;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
PIXEL16U *g1_plane;
PIXEL16U *rg_plane;
PIXEL16U *bg_plane;
PIXEL16U *g2_plane;
int g1_pitch;
int rg_pitch;
int bg_pitch;
int g2_pitch;
#if 0
int channel;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile) {
char label[_MAX_PATH];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
#endif
// Get the lowpass bands in the wavelet coresponding to the output frame
g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0];
rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0];
bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0];
if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK
{
g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0];
g2_pitch = transform_array[3]->wavelet[frame]->pitch;
}
else
{
g2_plane = NULL;
g2_pitch = 0;
}
// Get the pitch of each plane
g1_pitch = transform_array[0]->wavelet[frame]->pitch;
rg_pitch = transform_array[1]->wavelet[frame]->pitch;
bg_pitch = transform_array[2]->wavelet[frame]->pitch;
switch (format)
{
case DECODED_FORMAT_RGB32:
ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch,
bg_plane, bg_pitch, g2_plane, g2_pitch,
output_buffer, output_pitch,
frame_width, frame_height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to quarter resolution
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//FRAME_INFO *info = &decoder->frame;
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
//int format = info->format;
//TODO: Need to finish this routine
assert(0);
return error;
}
// Reconstruct the original YUV 4:2:2 encoded format to the requested output format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
int resolution = info->resolution;
int format = info->format;
//int color_space = decoder->frame.colorspace;
//TODO: Eliminate use of the chroma offset
int chroma_offset = decoder->codec.chroma_offset;
#if _THREADED
// Type of threaded inverse transform
//int type;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
//TODO: Split this routine into subroutines for progressive versus interlaced video
//TODO: Split progressive and interlaced routines into subroutines for each resolution
if(resolution == DECODED_RESOLUTION_HALF)
{
bool inverted = false;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
return CODEC_ERROR_OKAY;
#endif
}
else
{
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
return CODEC_ERROR_OKAY;
}
// Was the video source interlaced or progressive?
if (progressive)
{
// The video source was progressive (the first transform was a spatial transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24) {
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32) {
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
/*if(decoder->use_active_metadata_decoder)
{
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV: // computing the active metadata.
case DECODED_FORMAT_UYVY:
return CODEC_ERROR_OKAY;
break;
}
}*/
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
return CODEC_ERROR_OKAY;
#endif
}
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
return CODEC_ERROR_OKAY;
#endif
}
break;
//Handle sizes that are smaller than the interim decode buffer //DAN20081222
case DECODED_FORMAT_CbYCrY_10bit_2_8:
decoder->upper_plane = output;
decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2;
// Use the address and pitch of the lower plane
output = decoder->lower_plane;
pitch = decoder->frame.width * 2;
// Fall through and compute the inverse spatial transform
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
case DECODED_FORMAT_CbYCrY_8bit:
case DECODED_FORMAT_CbYCrY_16bit:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_V210:
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalYUVStrip16sToYUVOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false)
{
#if _THREADED
TransformInverseSpatialThreadedYUV422ToBuffer(decoder,
frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
#elif 0
TransformInverseSpatialToBuffer(decoder, transform_array, frame,
num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array,
frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripYUV16sToPackedRGB32);
#endif
return CODEC_ERROR_OKAY;
}
#if _THREADED
if(decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame,
num_channels, output, pitch,
&info2, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
return CODEC_ERROR_OKAY;
}
#endif
break;
default:
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
// else Return the error code for unsupported output format
break;
}
}
}
else
{
// The video source was interlaced (the first transform was a frame transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
bool inverted = false;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) {
// info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active.
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0) {
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_NV12:
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder
if(decoder->use_active_metadata_decoder)
{
int frame_size = info->width * info->height * 4;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
//TransformInverseSpatialUniversalThreadedToRow16u(
// decoder, frame, num_channels,
// (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2,
// info, chroma_offset, precision);
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4, info,
&decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 2; // yuv
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
return CODEC_ERROR_OKAY;
}
}
switch (format)
{
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_WP13: //DAN20110203 - missing
case DECODED_FORMAT_W13A: //DAN20110203 - missing
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into
case DECODED_FORMAT_R408: //the output buffer
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
//Old code converts 4:2:2 directly to RGBA (single threaded.)
//TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
// info, &decoder->scratch, chroma_offset, precision);
#endif
return CODEC_ERROR_OKAY;
default:
// else Return the error code for unsupported output format
break;
}
}
}
// The output format is not supported by this routine
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return error;
}
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
//IMAGE *wavelet;
//int wavelet_width;
//int wavelet_height;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
//int decoded_scale;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//TODO: Eliminate use of the chroma offset
if (decoder == NULL) {
return CODEC_ERROR_INVALID_ARGUMENT;
}
// This routine should only be called for progressive frames
assert(codec->progressive);
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) {
return CODEC_ERROR_OKAY;
}
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) {
return CODEC_ERROR_OKAY;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution)) {
return CODEC_ERROR_RESOLUTION;
}
// Compute the decoded width and height
ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
output += (info->height-1)*pitch;
pitch = -pitch;
}
#if (0 && DEBUG)
if (logfile) {
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile) {
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
if (!(info->width >= decoded_width)) {
return CODEC_ERROR_FRAMESIZE;
}
// assert((info->height+7)/8 >= (decoded_height+7)/8);
// if (!(info->height+7)/8 >= (decoded_height+7)/8) {
// return CODEC_ERROR_FRAMESIZE;
// }
START(tk_convert);
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
int chroma_offset = decoder->codec.chroma_offset;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
}
else
// Quarter resolution
if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Output quarter resolution for the two frame GOP
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
// Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame
}
else
// Half resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS];
int precision = codec->precision;
int chroma_offset = 0;
int channel;
if(decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Get the first level wavelet in each channel
for (channel = 0; channel < num_channels; channel++)
{
wavelet_array[channel] = transform_array[channel]->wavelet[frame];
}
// Pack the pixels from the lowpass band in each channel into the output buffer
CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch,
info, chroma_offset, precision);
}
}
// Full resolution or half horizontal
else
{
int chroma_offset = 0;
int precision = codec->precision;
// Reconstruct the output frame from a full resolution decode
//assert(resolution == DECODED_RESOLUTION_FULL);
if(decoder->use_active_metadata_decoder)
{
int frame_size, channels = 3;
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
channels = 4;
frame_size = info->width * info->height * channels * 2;
if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if(decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if(decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL)) {
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, &decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
switch (info->format)
{
case DECODED_FORMAT_B64A:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2B64A);
#else
TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YU64: //TODO : Threading
TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2RG30);
#else
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripRGB16sToPackedYUV8u);
#endif
break;
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGBA2YUVA);
#else
assert(0);
#endif
break;
case DECODED_FORMAT_YR16:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YR16);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_V210:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2v210);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
assert(0);// missing non-threaded version
#endif
break;
//TODO: Add code to handle other Avid pixel formats
case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT
case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8
case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14
case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6
assert(0);
break;
default:
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "Invalid decoded format: %d\n", info->format);
}
#endif
assert(0);
error = CODEC_ERROR_INVALID_FORMAT;
break;
}
}
}
STOP(tk_convert);
return error;
}
// Convert 16-bit signed lowpass data into the requested output format
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision)
{
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int format = info->format;
// Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values
const int shift = 16 - precision - PRESCALE_LUMA;
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
inverted = true;
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32_INVERTED:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //WIP
ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height,
output_pitch, format, inverted, shift, num_channels);
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
{
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
if (info->format == COLOR_FORMAT_YUYV)
{
ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
else if (info->format == COLOR_FORMAT_UYVY)
{
ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
}
break;
default:
{
int y;
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
IMAGE *a_image = image_array[3];
unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr;
//unsigned short *scanline2 = scanline + output_width*3;
uint8_t *newline = (uint8_t *)output_buffer;
unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL;
Rptr = (unsigned short *)r_image->band[0];
Gptr = (unsigned short *)g_image->band[0];
Bptr = (unsigned short *)b_image->band[0];
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
Aptr = (unsigned short *)a_image->band[0];
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
memcpy(scanline+info->width*3, Aptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
Aptr += a_image->pitch/2;
Convert4444LinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
else
{
for(y=0; y<output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width*2);
memcpy(scanline+info->width, Gptr, info->width*2);
memcpy(scanline+info->width*2, Bptr, info->width*2);
Rptr += r_image->pitch/2;
Gptr += g_image->pitch/2;
Bptr += b_image->pitch/2;
ConvertLinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
}
//assert(0);
break;
}
STOP(tk_convert);
}
#if _THREADED
// Threaded inverse transform using the new threads API
void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
//TODO: Add support for more output formats
int format = DECODED_FORMAT_RGB32;
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Choose the correct inverse horizontal filter for the output format
switch (format)
{
case DECODED_FORMAT_RGB32:
horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32;
break;
default:
assert(0);
return;
}
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#if (1 && DEBUG)
if (logfile) {
fprintf(logfile, "All worker threads signalled done\n");
}
#endif
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar;
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToOutput(
DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
#if _DELAY_THREAD_START
if(decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Routines for the worker threads that use the new threads API
void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index,
int frame_index, int num_channels,
uint8_t *output_buffer, int output_pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (1 && DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
PIXEL *lowlow_band[CODEC_MAX_CHANNELS];
PIXEL *lowhigh_band[CODEC_MAX_CHANNELS];
PIXEL *highlow_band[CODEC_MAX_CHANNELS];
PIXEL *highhigh_band[CODEC_MAX_CHANNELS];
int lowlow_pitch[CODEC_MAX_CHANNELS];
int lowhigh_pitch[CODEC_MAX_CHANNELS];
int highlow_pitch[CODEC_MAX_CHANNELS];
int highhigh_pitch[CODEC_MAX_CHANNELS];
int channel_width[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr;
uint8_t *plane_array[TRANSFORM_MAX_CHANNELS];
int plane_pitch[TRANSFORM_MAX_CHANNELS];
int output_width = info->width;
int output_height = info->height;
int half_height = output_height/2;
int luma_band_width;
ROI strip;
char *bufptr;
int last_row;
int last_display_row;
int last_line;
int channel;
int row;
int odd_display_lines = 0;
THREAD_ERROR error;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
//TODO: Replace uses of buffer variables with calls to the scratch space API
// This version is for 16-bit pixels
assert(sizeof(PIXEL) == 2);
// Must have a valid inverse horizontal filter
assert(horizontal_filter_proc != NULL);
// Check for enough space in the local array allocations
// assert(num_channels <= CODEC_NUM_CHANNELS);
assert(num_channels <= TRANSFORM_MAX_CHANNELS);
// Divide the buffer space between the four threads
buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4
buffer += buffer_size * thread_index;
// Round the buffer pointer up to the next cache line
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK));
bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE);
// Allocate buffer space for the output rows from each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the row width for this channel
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
int width = wavelet->width;
int height = wavelet->height;
//int pitch = wavelet->pitch;
size_t channel_buffer_size;
// Compute the width and pitch for the output rows stored in this buffer
int buffer_width = 2 * width;
int buffer_height = 2;
int buffer_pitch = ALIGN16(buffer_width);
// Compute the total allocation for this channel
channel_buffer_size = buffer_height * buffer_pitch;
// Check that there is enough space available
assert(channel_buffer_size <= buffer_size);
// Allocate the buffer for this channel
plane_array[channel] = (uint8_t *)bufptr;
// Remember the pitch for rows in this channel
plane_pitch[channel] = buffer_pitch;
// Advance the buffer pointer past the allocated space for this channel
bufptr += channel_buffer_size;
// Reduce the amount of space remaining in the buffer
buffer_size -= channel_buffer_size;
// The dimensions of the output image are the same as the luma channel
if (channel == 0)
{
strip.width = buffer_width;
strip.height = buffer_height;
last_row = height;
//DAN20050606 Added to fix issue with non-div by 8 heihts.
last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines.
odd_display_lines = info->height & 1;
// Remember the width of the wavelet bands for luma
luma_band_width = width;
}
// Save the bands per channel for routines that process all channels at once
lowlow_band[channel] = wavelet->band[0];
lowhigh_band[channel] = wavelet->band[1];
highlow_band[channel] = wavelet->band[2];
highhigh_band[channel] = wavelet->band[3];
lowlow_pitch[channel] = wavelet->pitch;
lowhigh_pitch[channel] = wavelet->pitch;
highlow_pitch[channel] = wavelet->pitch;
highhigh_pitch[channel] = wavelet->pitch;
// Remember the width of the wavelet for this channel
channel_width[channel] = width;
}
// Use the remaining buffer space for intermediate results
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK));
buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE);
if (last_row == last_display_row)
{
last_line = half_height - 1;
}
else
{
last_line = half_height;
}
if(odd_display_lines)
last_line++;
if (thread_index == TRANSFORM_WORKER_TOP_THREAD)
{
// Process the first row
row = 0;
output_row_ptr = output_buffer;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the first row using special border filters for the top row
InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc);
}
if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1)
{
if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash
{
int pitch = output_pitch;
// Process the last row
row = last_row - 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
pitch >>= 1;
// Begin filling the last output row with results
output_row_ptr = output_buffer + row * 2 * pitch;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the last row using special border filters for the bottom row
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix
output_row_ptr -= output_pitch;
InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision, odd_display_lines,
horizontal_filter_proc);
}
}
// Loop until all of the middle rows have been processed
for (;;)
{
int work_index;
int row;
// Wait for one row from each channel to process
error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index);
// Is there another row to process?
if (error == THREAD_ERROR_OKAY)
{
int pitch = output_pitch;
// Compute the next row to process from the work index
row = work_index + 1;
if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked
pitch >>= 1;
// Compute the output row corresponding to this row index
output_row_ptr = output_buffer + row * 2 * pitch;
}
else
{
// No more work to do
return;
}
// Is the row inside the top and bottom border?
if (0 < row && row < last_line)
{
int outputlines = 2;
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
if(odd_display_lines && row==last_line-1)
{
outputlines = 1;
}
// Process the middle row using the normal wavelet filters
InvertSpatialMiddleRow16sToOutput(decoder, thread_index,
lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc,
outputlines);
}
}
}
#endif //_THREADED
bool GetTuplet(unsigned char *data, int datasize,
unsigned short findtag, unsigned short *retvalue)
{
bool ret = false;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
//char t[100];
InitBitstream(&myinput);
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = true;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
/*!
Copied from metadata.cpp in the cedoc common directory
*/
uint8_t *GetTupletAddr(uint8_t *data,
int datasize,
uint16_t findtag,
int16_t *retvalue)
{
unsigned char *ret = NULL;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag,value;
int error = 0;
if (data == NULL || datasize == 0) {
return NULL;
}
//InitBitstream(&myinput);
memset(&myinput, 0, sizeof(BITSTREAM));
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
myinput.nBitsFree = BITSTREAM_LONG_SIZE;
pinput = &myinput;
do
{
//BOOL optional = FALSE;
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
//optional = TRUE;
optional = true;
}
if(tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag&0xff)<<16);
}
else if(tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if(tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if(tag == (int)findtag)
{
*retvalue = value;
ret = pinput->lpCurrentWord;
break;
}
if((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if((tag & 0xff00) == 0x2100) //level
skip = 0;
if(chunksize)
{
if(chunksize*4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if(skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize*4;
pinput->nWordsUsed -= chunksize*4;
}
}
}
else
{
error = 1;
}
} while(tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed>0 && !error);
return ret;
}
|
quantized_conv2d.h | /* Copyright 2018 The Blueoil Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
#define DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
#include <vector>
#include <memory>
#include <stdexcept>
#include "tensor_view.h"
#include "tensor_convert.h"
#include "operators.h"
#include "time_measurement.h"
#include "func/impl/quantized_conv2d_tiling.h"
#include "func/impl/quantized_conv2d_kn2row.h"
#ifdef _OPENMP
#include <omp.h>
#endif
template <typename T, MemoryLayout layout>
void QuantizedConv2D(const TensorView<T, layout>& input,
const kernel_t& kernel,
binary_convolution_parameters p) {
Measurement::Start("QuantizedConv2D");
constexpr T_UINT TilingInTypeBitWidth = dlk::impl::tiling_input_elem_t::BitCount;
T_UINT kh = p.normal_conv_params.kernel_height;
T_UINT kw = p.normal_conv_params.kernel_width;
T_UINT padding = p.normal_conv_params.padding;
T_UINT ih = p.normal_conv_params.input_height;
T_UINT iw = p.normal_conv_params.input_width;
T_UINT ic = p.normal_conv_params.kernel_depth;
T_UINT oc = p.normal_conv_params.output_channels;
auto size = oc * ih * iw;
if (p.device_output_buf == nullptr)
p.device_output_buf = new BIN_CONV_OUTPUT[size]();
if ((kh == 3 && kw == 3 && padding == 1) ||
(kh == 1 && kw == 1 && padding == 0)) {
#ifdef RUN_ON_FPGA
dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = {
(ic + QUANTIZED_PACKED::BitCount - 1) / QUANTIZED_PACKED::BitCount,
ih,
iw,
p.bin_input_bitwidth,
QUANTIZED_PACKED::BitCount
};
dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::TCAConv2d(tmp, kernel, p);
#elif defined USE_NEON || defined USE_AVX
dlk::impl::tiling_input_t::tensor_info_t<std::size_t> shape = {
ic / TilingInTypeBitWidth,
ih,
iw,
p.bin_input_bitwidth,
TilingInTypeBitWidth
};
dlk::impl::tiling_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::QuantizedConv2DTiling(tmp, kernel, p);
#else
dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = {
ih,
iw,
ic / QUANTIZED_PACKED::BitCount,
p.bin_input_bitwidth,
QUANTIZED_PACKED::BitCount
};
dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape);
Measurement::Start("Tensor convert");
convert_tensor(input, tmp);
Measurement::Stop();
dlk::impl::QuantizedConv2DKn2Row(tmp, kernel, p);
#endif
} else {
throw std::invalid_argument("Unsupported convolution parameter");
}
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2D(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
Measurement::Start("QuantizedConv2D_ApplyScalingFactor");
unsigned out_elems = p.normal_conv_params.output_height *
p.normal_conv_params.output_width *
p.normal_conv_params.output_channels;
// temporary: (2^n - 1) * (max - min)
const T_FLOAT post_qtz_factor = 2.0f / 3.0f;
int b = 32;
auto &ncp(p.normal_conv_params);
auto true_out_channels = output.get_shape()[3];
auto channel_blocks = (true_out_channels + b - 1) / b;
int out_index = 0;
for (int h = 0; h < ncp.output_height; ++h)
for (int w = 0; w < ncp.output_width; ++w)
for (int s = 0; s < channel_blocks; ++s)
for (int d = 0; d < std::min(b, (int)true_out_channels - s*b); ++d)
output.data()[out_index++] = (scaling_factor * post_qtz_factor) * p.device_output_buf[h * (b * ncp.output_width) + w * b + s * (ncp.output_height * ncp.output_width * b) + d];
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2D(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
T_FLOAT scaling_factor[],
binary_convolution_parameters p) {
QuantizedConv2D(input, kernel, p);
unsigned out_elems =
p.normal_conv_params.output_height * p.normal_conv_params.output_width;
unsigned out_channels = p.normal_conv_params.output_channels;
int b = 32;
auto& ncp(p.normal_conv_params);
auto true_out_channels = output.get_shape()[3];
auto channel_blocks = (true_out_channels + b - 1) / b;
// temporary: (2^n - 1) * (max - min)
T_FLOAT post_qtz_factor = 2.0 / 3.0;
Measurement::Start("QuantizedConv2D_ApplyScalingFactor");
int out_index = 0;
for (int h = 0; h < ncp.output_height; ++h)
for (int w = 0; w < ncp.output_width; ++w)
for (int s = 0; s < channel_blocks; ++s)
for (int d = 0; d < std::min(b, (int)true_out_channels - s*b); ++d)
output.data()[out_index++] = (scaling_factor[s*b + d] * post_qtz_factor) * p.device_output_buf[h * (b * ncp.output_width) + w * b + s * (ncp.output_height * ncp.output_width * b) + d];
Measurement::Stop();
}
template<typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
unsigned out_elems = p.normal_conv_params.output_height *
p.normal_conv_params.output_width *
p.normal_conv_params.output_channels;
const auto bytes = out_elems / 8 * p.n_bit;
Measurement::Start("Memcpy");
#ifdef _OPENMP
const int num_blocks = bytes / sizeof(QUANTIZED_PACKED);
const int num_threads = omp_get_max_threads();
const int chunk_size = (num_blocks + num_threads - 1) / num_threads;
#pragma omp parallel for
for (int i = 0; i < num_blocks; i += chunk_size) {
memcpy(output.data() + i,
(QUANTIZED_PACKED*)(p.device_output_buf) + i,
std::min(chunk_size, num_blocks - i) * sizeof(QUANTIZED_PACKED));
}
#else
memcpy(output.data(), (void*)p.device_output_buf, bytes);
#endif
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
const T_FLOAT scaling_factor,
const binary_convolution_parameters& p) {
QuantizedConv2D(input, kernel, p);
Measurement::Start("linear_to_float");
T_FLOAT n = (1 << p.n_bit) - 1;
const auto& np = p.normal_conv_params;
const auto out_height = np.output_height;
const auto out_width = np.output_width;
const auto out_channels = np.output_channels;
const auto true_out_channels = output.get_shape()[3];
QUANTIZED_PACKED::base_t* ptr = (QUANTIZED_PACKED::base_t*)p.device_output_buf;
for (unsigned r = 0; r < out_height; ++r) {
for (unsigned c = 0; c < out_width; ++c) {
for (unsigned d = 0; d < true_out_channels; ++d) {
const auto i = r * out_width * p.n_bit + c * p.n_bit;
QUANTIZED_PACKED::base_t bits = 0;
for (unsigned digit = 0; digit < p.n_bit; ++digit) {
bits |= ((ptr[i + digit] >> d) & 1) << digit;
}
T_FLOAT tmp = (T_FLOAT)bits;
tmp = tmp / n;
output(0, r, c, d) = tmp * p.max_value;
}
}
}
Measurement::Stop();
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output,
const T_FLOAT scaling_factor[],
const binary_convolution_parameters& p) {
func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0],
p);
}
template <typename T, MemoryLayout layout>
void func_QuantizedConv2DWithThreshold(
const TensorView<T, layout>& input,
const kernel_t& kernel,
const TensorView<T_FLOAT, MemoryLayout::NHWC>& output,
T_FLOAT scaling_factor[],
binary_convolution_parameters p) {
func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0],
p);
}
#endif // DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(const Image *images,
MagickPixelPacket **pixels)
{
ssize_t
i;
size_t
rows;
assert(pixels != (MagickPixelPacket **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
MagickPixelPacket
**pixels;
ssize_t
i,
j;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=GetImageListLength(images);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
GetMagickPixelPacket(images,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(MagickRealType) -(QuantumRange*pow((double) -(QuantumScale*
pixel),(double) value));
else
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((MagickRealType) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
number_images=GetImageListLength(images);
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict evaluate_indexes;
MagickPixelPacket
*evaluate_pixel;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict evaluate_indexes;
ssize_t
i,
x;
MagickPixelPacket
*evaluate_pixel;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0*PerceptibleReciprocal(width)*(QuantumScale*pixel-center);
if (result <= -1.0)
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,length*sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) memset(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)*
PerceptibleReciprocal(MagickLog10((double) number_bins.red));
histogram[i].green*=area;
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)*
PerceptibleReciprocal(MagickLog10((double) number_bins.green));
histogram[i].blue*=area;
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)*
PerceptibleReciprocal(MagickLog10((double) number_bins.blue));
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)*
PerceptibleReciprocal(MagickLog10((double) number_bins.opacity));
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)*
PerceptibleReciprocal(MagickLog10((double) number_bins.index));
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict polynomial_indexes;
MagickPixelPacket
*polynomial_pixel;
PixelPacket
*magick_restrict q;
ssize_t
i,
x;
size_t
number_images;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
number_images=GetImageListLength(images);
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
SkipList
*list;
ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
ListNode
*root;
SkipList
*list;
ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict statistic_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
const IndexPacket
*magick_restrict s;
const PixelPacket
*magick_restrict r;
ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
pi-v14.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using just two threads */
// WARNING : correct code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(static, num_steps/4) nowait
for (i=0; i < num_steps/2; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp for schedule(static, num_steps/4) nowait
for (i=num_steps/2; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) && (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->matte != MagickFalse))
type=GrayscaleMatteType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].opacity),range),
range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
Graph.h | #ifndef BasicGraph
#define BasicGraph
/*
* Graph.h:
* manage nodes in a neural network model
*
* Created on: Apr 21, 2017
* Author: mszhang
*/
#include "Eigen/Dense"
#include "Node.h"
#include "MyLib.h"
using namespace Eigen;
// one Node means a vector
// the col should be 1, because we aimed for NLP only
class Graph {
protected:
vector<PExecute> execs; //backward
vector<PNode> nodes; //forward
vector<PNode> free_nodes;
vector<PNode> finish_nodes;
vector<PNode> all_nodes;
public:
bool train;
dtype drop_factor;
public:
Graph() {
execs.clear();
execs.clear();
nodes.clear();
free_nodes.clear();
drop_factor = 1.0;
}
virtual ~Graph() {
int count = execs.size();
for (int idx = 0; idx < count; idx++) {
delete execs[idx];
}
execs.clear();
execs.clear();
nodes.clear();
free_nodes.clear();
}
inline void setDropFactor(dtype cur_drop_factor) {
drop_factor = cur_drop_factor;
if (drop_factor <= 0) drop_factor = 0;
if (drop_factor >= 1.0) drop_factor = 1.0;
}
public:
inline void clearValue(const bool& bTrain = false) {
int count = execs.size();
for (int idx = 0; idx < count; idx++) {
delete execs[idx];
}
execs.clear();
count = nodes.size();
for (int idx = 0; idx < count; idx++) {
nodes[idx]->clearValue();
}
nodes.clear();
free_nodes.clear();
finish_nodes.clear();
all_nodes.clear();
train = bTrain;
}
inline void backward() {
int count = execs.size();
for (int idx = count - 1; idx >= 0; idx--) {
execs[idx]->backward();
}
}
inline void addNode(PNode x) {
nodes.push_back(x);
if (x->degree == 0) {
free_nodes.push_back(x);
}
all_nodes.push_back(x);
}
//real executation
inline void compute() {
int free_count = free_nodes.size();
while (free_count > 0) {
vector<PExecute> cur_execs;
int cur_execs_size = 0;
for (int idx = 0; idx < free_count; idx++) {
bool find = false;
for (int idy = 0; idy < cur_execs_size; idy++) {
if (cur_execs[idy]->addNode(free_nodes[idx])) {
find = true;
break;
}
}
if (!find) {
PExecute new_exec = free_nodes[idx]->generate(train, drop_factor);
cur_execs.push_back(new_exec);
cur_execs_size++;
}
}
//execute
//#pragma omp parallel for
for (int idy = 0; idy < cur_execs_size; idy++) {
cur_execs[idy]->forward();
}
for (int idy = 0; idy < cur_execs_size; idy++) {
execs.push_back(cur_execs[idy]);
}
//finished nodes
vector<PNode> new_free_nodes;
for (int idx = 0; idx < free_count; idx++) {
finish_nodes.push_back(free_nodes[idx]);
int parent_count = free_nodes[idx]->parents.size();
for (int idy = 0; idy < parent_count; idy++) {
free_nodes[idx]->parents[idy]->degree--;
if (free_nodes[idx]->parents[idy]->degree == 0) {
new_free_nodes.push_back(free_nodes[idx]->parents[idy]);
}
}
}
// update free nodes
free_nodes.clear();
free_count = new_free_nodes.size();
for (int idx = 0; idx < free_count; idx++) {
free_nodes.push_back(new_free_nodes[idx]);
}
}
if (finish_nodes.size() != all_nodes.size()) {
std::cout << "error: several nodes are not executed, finished: " << finish_nodes.size() << ", all: " << all_nodes.size() << std::endl;
int total_node_num = all_nodes.size();
int unprocessed = 0;
for (int idx = 0; idx < total_node_num; idx++) {
PNode curNode = all_nodes[idx];
if (curNode->degree >= 0) {
curNode->typeEqual(all_nodes[0]);
unprocessed++;
}
}
std::cout << "unprocessed: " << unprocessed << std::endl;
}
}
};
// one very useful function to collect pointers of derived nodes
template<typename DerivedNode>
inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int size) {
int usedSize = inputs.size();
if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = 0; idx < usedSize; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(DerivedNode inputs[], int size) {
//int usedSize = inputs.;
//if (size >= 0 && size < usedSize) usedSize = size;
int usedSize = size;
vector<PNode> pnodes;
for (int idx = 0; idx < usedSize; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int start, int length) {
int end, tmp_end = start + length;
if (tmp_end > inputs.size())
end = inputs.size();
else
end = tmp_end;
//if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = start; idx < end; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(DerivedNode inputs[], int size, int start, int length) {
int end, tmp_end = start + length;
if (tmp_end > size)
end = size;
else
end = tmp_end;
//if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = start; idx < end; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
#endif
|
GB_binop__bxor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16)
// C=scalar+B GB (_bind1st__bxor_int16)
// C=scalar+B' GB (_bind1st_tran__bxor_int16)
// C=A+scalar GB (_bind2nd__bxor_int16)
// C=A'+scalar GB (_bind2nd_tran__bxor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim
*/
#include "GKlib.h"
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_CSR_FMT_BINROW) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_BINCOL) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_CLUTO) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
}
else if (format == GK_CSR_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
FILE *fpout;
if (format == GK_CSR_FMT_BINROW) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
}
if (format == GK_CSR_FMT_BINCOL) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
}
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
double logscale = 1.0/log(2.0);
#pragma omp for schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
}
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns the number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
rnorms = mat->rnorms;
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
rsums = mat->rsums;
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
|
sharing-1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
#include <stdlib.h>
int thrglobalvar;
#pragma omp threadprivate (thrglobalvar)
int globalvar;
const int constvar = 8;
int
foo (int x)
{
return x;
}
int
bar (int *x)
{
return *x;
}
int
main (void)
{
static int thrlocvar;
#pragma omp threadprivate (thrlocvar)
static int locvar;
static int *p;
int i, j, s, l;
p = malloc (sizeof (int));
if (p == NULL)
return 0;
*p = 7;
s = 6;
l = 0;
#pragma omp parallel for /* { dg-error "enclosing 'parallel'" } */ \
default (none) private (p) shared (s)
for (i = 0; i < 64; i++)
{
int k = foo (0); /* Predetermined - private (automatic var declared */
k++; /* in scope of construct). */
thrglobalvar++; /* Predetermined - threadprivate. */
thrlocvar++; /* Predetermined - threadprivate. */
foo (i); /* Predetermined - private (omp for loop variable). */
foo (constvar); /* Predetermined - shared (const qualified type). */
foo (*p); /* *p predetermined - shared (heap allocated */
(*p)++; /* storage). */
bar (p); /* Explicitly determined - private. */
foo (s); /* Explicitly determined - shared. */
globalvar++; /* { dg-error "not specified in" } */
locvar++; /* { dg-error "not specified in" } */
l++; /* { dg-error "not specified in" } */
for (j = 0; j < 2; j++); /* { dg-error "not specified in" } */
}
return 0;
}
|
block-3.c | // { dg-do compile }
extern int test(int);
void foo()
{
int i;
for (i = 0; i < 10; ++i)
{
#pragma omp sections
{
continue; // { dg-error "invalid branch to/from OpenMP structured block" }
}
}
#pragma omp sections
{
#pragma omp section
{ bad1: ; }
#pragma omp section
goto bad1; // { dg-error "invalid branch to/from OpenMP structured block" }
}
#pragma omp sections
{
goto bad2; // { dg-error "invalid branch to/from OpenMP structured block" }
}
bad2:;
goto bad3; // { dg-error "invalid entry to OpenMP structured block" }
#pragma omp sections
{
bad3: ;
}
#pragma omp sections
{
{
goto ok1;
ok1:;
}
#pragma omp section
for (i = 0; i < 10; ++i)
if (test(i))
break;
else
continue;
#pragma omp section
switch (i)
{
case 0:
break;
default:
test(i);
}
}
}
|
GB_unaryop__identity_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_int64
// op(A') function: GB_tran__identity_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_int64
(
int64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_fp64
// op(A') function: GB_tran__abs_int16_fp64
// C type: int16_t
// A type: double
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z ; GB_CAST_SIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_fp64
(
int16_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
17_omp_sharing_semantics.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
extern void MPI_Send(void*, int);
void foo(int count) {
// firstprivate > every thread has a private copy (d.addr) of d (which is passes to outlined region for copy)
// check-inst: define {{.*}} @foo
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d.addr = alloca i64, align 8
// check-inst-NEXT: %0 = bitcast i64* %d.addr to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 3, i64 1)
#pragma omp parallel for schedule(dynamic, 1) firstprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
}
void bar(int count) {
// lastprivate - value of d is copied to "private_val" (which is tracked) in outlined region and thus not tracked.
// --> see "void bar2()" for different scenario with tracking of "d"
// check-inst: define {{.*}} @bar
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d{{[0-9]}} = alloca i32
// check-inst-NEXT: %0 = bitcast i32* %d{{[0-9]}} to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) lastprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
}
void bar2(int count) {
// check-inst: define {{.*}} @bar2
// check-inst: %d = alloca
// check-inst-NEXT: %0 = bitcast i32* %d to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d{{[0-9]}} = alloca i32
// check-inst-NEXT: %0 = bitcast i32* %d{{[0-9]}} to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) lastprivate(d) shared(e)
for (int i = 0; i < count; ++i) {
// Analysis should not filter d, but e...
MPI_Send((void*)&d, e);
}
MPI_Send((void*)&d, e);
}
void foo_bar(int count) {
// private: d, e are "randomly" initialised values inside outlined region (outer d,e are not passed)
// check-inst: define {{.*}} @foo_bar
// check-inst-NOT: call void @__typeart_alloc_stack
int d = 3;
int e = 4;
// check-inst: define {{.*}} @.omp_outlined
// check-inst: %d = alloca
// check-inst-NEXT: %0 = bitcast i32* %d to i8*
// check-inst-NEXT: call void @__typeart_alloc_stack_omp(i8* %0, i32 2, i64 1)
#pragma omp parallel for schedule(dynamic, 1) private(d, e)
for (int i = 0; i < count; ++i) {
MPI_Send((void*)&d, e);
}
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 5
// CHECK-NEXT: Global : 0
|
fun_p.c | /**********************************************************************************************************
* AC - OpenMP -- PARALELA *
* Rutinas que se utilizan en el modulo gengrupos_s.c *
* fun_p.c *
**********************************************************************************************************/
#include <math.h>
#include <float.h>
#include <omp.h>
#include "defineg.h" // Definiciones
/**********************************************************************************************************
* 1 - Funcion para calcular la distancia vserie entre dos elementos (distancia euclidea) *
* Entrada: 2 elementos con NCAR caracteristicas (por referencia) *
* Salida: distancia (double) *
**********************************************************************************************************/
double gendist (float *elem1, float *elem2) {
int i;
double acum = 0;
for (i = 0; i < NCAR; i++) {
double res = elem1[i] - elem2[i];
acum += pow(res, 2);
}
return sqrt(acum);
}
/**********************************************************************************************************
* 2 - Funcion para calcular el grupo (cluster) mas cercano (centroide mas cercano) *
* Entrada: nelem numero de elementos, int *
* elem elementos, una matriz de tamano MAXE x NCAR, por referencia *
* cent centroides, una matriz de tamano NGRUPOS x NCAR, por referencia *
* Salida: popul grupo mas cercano a cada elemento, vector de tamano MAXE, por referencia *
**********************************************************************************************************/
void grupo_cercano (int nelem, float elem[][NCAR], float cent[][NCAR], int *popul) {
int ngrupo, i, j;
double adis, dmin;
#pragma omp parallel for private(i, j, adis, dmin, ngrupo) schedule(dynamic,2) num_threads(32)
for (i = 0; i < nelem; i++) {
dmin = DBL_MAX;
for (j = 0; j < NGRUPOS; j++) {
adis = gendist(elem[i], cent[j]); // elem[i] o &elem[i][0]
if (adis < dmin) {
dmin = adis;
ngrupo = j;
}
}
popul[i] = ngrupo;
}
}
/**********************************************************************************************************
* 3 - Funcion para calcular la densidad del grupo (dist. media entre todos sus elementos) *
* Entrada: elem elementos, una matriz de tamano MAXE x NCAR, por referencia *
* listag vector de NGRUPOS structs (informacion de grupos generados), por ref. *
* Salida: densidad densidad de los grupos (vector de tamano NGRUPOS, por referencia) *
**********************************************************************************************************/
void calcular_densidad (float elem[][NCAR], struct lista_grupos *listag, float *densidad) {
int i, j, k, nelem, actg, othg;
double acum, cont;
for (i = 0; i < NGRUPOS; i++) {
nelem = listag[i].nelemg;
if (nelem < 2) {
densidad[i] = 0;
}
else {
acum = 0.0;
cont = 0.0;
#pragma omp parallel for private(j, k, actg, othg) reduction(+ : acum, cont) schedule(dynamic,2) num_threads(32)
for (j = 0; j < nelem; j++) {
actg = listag[i].elemg[j];
for (k = j + 1; k < nelem; k++) {
othg = listag[i].elemg[k];
acum += gendist(elem[actg], elem[othg]);
cont += 1.0;
}
}
densidad[i] = (float) (acum / cont);
}
}
}
/**********************************************************************************************************
* 4 - Funcion para relizar el analisis de enfermedades *
* Entrada: listag vector de NGRUPOS structs (informacion de grupos generados), por ref. *
* enf enfermedades, una matriz de tamano MAXE x TENF, por referencia *
* Salida: prob_enf vector de TENF structs (informacion del analisis realizado), por ref. *
**********************************************************************************************************/
void analizar_enfermedades (struct lista_grupos *listag, float enf[][TENF], struct analisis *prob_enf) {
int i, j, k, actg, nelem, gmax, gmin;
float mediaact, acum, mediamin, mediamax;
for (i = 0; i < TENF; i++) {
mediamin = FLT_MAX, mediamax = FLT_MIN;
for (j = 0; j < NGRUPOS; j++) {
nelem = listag[j].nelemg;
acum = 0;
#pragma omp parallel for private(k, actg, mediaact) shared(mediamin, mediamax, gmin, gmax) reduction(+ : acum) schedule(static) num_threads(2)
for (k = 0; k < nelem; k++) {
actg = listag[j].elemg[k];
acum += enf[actg][i];
}
mediaact = acum / nelem;
if (mediaact < mediamin) {
mediamin = mediaact;
gmin = j;
} else if (mediaact >= mediamax) {
mediamax = mediaact;
gmax = j;
}
}
prob_enf[i].max = mediamax;
prob_enf[i].min = mediamin;
prob_enf[i].gmax = gmax;
prob_enf[i].gmin = gmin;
}
} |
plane.h | #ifndef batoid_plane_h
#define batoid_plane_h
#include "surface.h"
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class Plane : public Surface {
public:
Plane();
~Plane();
virtual const Surface* getDevPtr() const override;
virtual double sag(double x, double y) const override;
virtual void normal(
double x, double y,
double& nx, double& ny, double& nz
) const override;
virtual bool timeToIntersect(
double x, double y, double z,
double vx, double vy, double vz,
double& dt
) const override;
};
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif
|
es_median_filter.c | /* This file was taken from modified dcraw published by Paul Lee
on January 23, 2009, taking dcraw ver.8.90/rev.1.417
as basis.
http://sites.google.com/site/demosaicalgorithms/modified-dcraw
As modified dcraw source code was published, the release under
GPL Version 2 or later option could be applied, so this file
is taken under this premise.
*/
/*
Copyright (C) 2009 Paul Lee
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
Refinement process on demosaiced image with
edge-sensitive median filter and pattern recognition based update
Reference: Hybrid color filter array demosaicking for effective
artifact suppression, L. Chang and Y-P Tan, Journal of Electronic
Imaging 15(1) 013003 (Jan-Mar 2006)
*/
#define PIX_SORT(a,b) { if ((a)>(b)) {temp=(a);(a)=(b);(b)=temp;} }
void CLASS es_median_filter()
{
ushort (*pix)[4];
int (*mf)[3], (*pc)[3], p[25], indx, row, col, c, d, temp, i, j;
int v0, v1, v2, edge_cnt, smooth_cnt, w1, w2;
int dC0, dC1, dC2, dC3, dC4, pass;
int p00, p01, p02, p03, p04, p05, p06, p07, p08, p09,
p15, p16, p17, p18, p19, p20, p21, p22, p23, p24;
double EA, T=1280;
w1 = width;
w2 = 2*w1;
p00 = -w2-2; p01 = -w2-1; p02 = -w2 ; p03 = -w2+1; p04 = -w2+2;
p05 = -w1-2; p06 = -w1-1; p07 = -w1 ; p08 = -w1+1; p09 = -w1+2;
p15 = w1-2; p16 = w1-1; p17 = w1 ; p18 = w1+1; p19 = w1+2;
p20 = w2-2; p21 = w2-1; p22 = w2 ; p23 = w2+1; p24 = w2+2;
/* Allocate buffer for 3x3 median filter */
mf = (int (*)[3])calloc(width*height, sizeof *mf);
for (pass=1; pass <= es_med_passes; pass++) {
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_("Edge-sensitive median filter pass %d...\n"), pass);
#endif
for (c=0; c < 3; c+=2) {
#ifdef DCRAW_VERBOSE
if (verbose) {
if (c == 0)
fprintf (stderr,_("\tR-G: 5x5 median filter + 3x3 Laplacian..."));
else
fprintf (stderr,_("\tB-G: 5x5 median filter + 3x3 Laplacian...")); }
#endif
/* Compute differential color plane */
for (indx=0; indx < height*width; indx++)
mf[indx][c] = image[indx][c] - image[indx][1];
/* Apply 3x3 median fileter */
/* for (row=1; row < height-1; row++) */
/* for (col=1; col < width-1; col++) { */
/* pc = mf + row*width+col; */
/* /\* Assign 3x3 differential color values *\/ */
/* p[0] = pc[p06][c]; p[1] = pc[p07][c]; p[2] = pc[p08][c]; */
/* p[3] = pc[ -1][c]; p[4] = pc[ 0][c]; p[5] = pc[ 1][c]; */
/* p[6] = pc[p16][c]; p[7] = pc[p17][c]; p[8] = pc[p18][c]; */
/* /\* Sort for median of 9 values *\/ */
/* PIX_SORT(p[1],p[2]); PIX_SORT(p[4], p[5]); PIX_SORT(p[7],p[8]); */
/* PIX_SORT(p[0],p[1]); PIX_SORT(p[3], p[4]); PIX_SORT(p[6],p[7]); */
/* PIX_SORT(p[1],p[2]); PIX_SORT(p[4], p[5]); PIX_SORT(p[7],p[8]); */
/* PIX_SORT(p[0],p[3]); PIX_SORT(p[5], p[8]); PIX_SORT(p[4],p[7]); */
/* PIX_SORT(p[3],p[6]); PIX_SORT(p[1], p[4]); PIX_SORT(p[2],p[5]); */
/* PIX_SORT(p[4],p[7]); PIX_SORT(p[4], p[2]); PIX_SORT(p[6],p[4]); */
/* PIX_SORT(p[4],p[2]); */
/* pc[0][1] = p[4]; */
/* } */
/* Apply 5x5 median fileter */
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for private(row,col,pc,p)
#endif
for (row=2; row < height-2; row++)
for (col=2; col < width-2; col++) {
pc = mf + row*width+col;
/* Assign 5x5 differential color values */
p[ 0] = pc[p00][c]; p[ 1] = pc[p01][c]; p[ 2] = pc[p02][c];
p[ 3] = pc[p03][c]; p[ 4] = pc[p04][c]; p[ 5] = pc[p05][c];
p[ 6] = pc[p06][c]; p[ 7] = pc[p07][c]; p[ 8] = pc[p08][c];
p[ 9] = pc[p09][c]; p[10] = pc[ -2][c]; p[11] = pc[ -1][c];
p[12] = pc[ 0][c]; p[13] = pc[ 1][c]; p[14] = pc[ 2][c];
p[15] = pc[p15][c]; p[16] = pc[p16][c]; p[17] = pc[p17][c];
p[18] = pc[p18][c]; p[19] = pc[p19][c]; p[20] = pc[p20][c];
p[21] = pc[p21][c]; p[22] = pc[p22][c]; p[23] = pc[p23][c];
p[24] = pc[p24][c];
/* Sort for median of 25 values */
PIX_SORT(p[ 0],p[ 1]); PIX_SORT(p[ 3],p[ 4]); PIX_SORT(p[ 2],p[ 4]);
PIX_SORT(p[ 2],p[ 3]); PIX_SORT(p[ 6],p[ 7]); PIX_SORT(p[ 5],p[ 7]);
PIX_SORT(p[ 5],p[ 6]); PIX_SORT(p[ 9],p[10]); PIX_SORT(p[ 8],p[10]);
PIX_SORT(p[ 8],p[ 9]); PIX_SORT(p[12],p[13]); PIX_SORT(p[11],p[13]);
PIX_SORT(p[11],p[12]); PIX_SORT(p[15],p[16]); PIX_SORT(p[14],p[16]);
PIX_SORT(p[14],p[15]); PIX_SORT(p[18],p[19]); PIX_SORT(p[17],p[19]);
PIX_SORT(p[17],p[18]); PIX_SORT(p[21],p[22]); PIX_SORT(p[20],p[22]);
PIX_SORT(p[20],p[21]); PIX_SORT(p[23],p[24]); PIX_SORT(p[ 2],p[ 5]);
PIX_SORT(p[ 3],p[ 6]); PIX_SORT(p[ 0],p[ 6]); PIX_SORT(p[ 0],p[ 3]);
PIX_SORT(p[ 4],p[ 7]); PIX_SORT(p[ 1],p[ 7]); PIX_SORT(p[ 1],p[ 4]);
PIX_SORT(p[11],p[14]); PIX_SORT(p[ 8],p[14]); PIX_SORT(p[ 8],p[11]);
PIX_SORT(p[12],p[15]); PIX_SORT(p[ 9],p[15]); PIX_SORT(p[ 9],p[12]);
PIX_SORT(p[13],p[16]); PIX_SORT(p[10],p[16]); PIX_SORT(p[10],p[13]);
PIX_SORT(p[20],p[23]); PIX_SORT(p[17],p[23]); PIX_SORT(p[17],p[20]);
PIX_SORT(p[21],p[24]); PIX_SORT(p[18],p[24]); PIX_SORT(p[18],p[21]);
PIX_SORT(p[19],p[22]); PIX_SORT(p[ 8],p[17]); PIX_SORT(p[ 9],p[18]);
PIX_SORT(p[ 0],p[18]); PIX_SORT(p[ 0],p[ 9]); PIX_SORT(p[10],p[19]);
PIX_SORT(p[ 1],p[19]); PIX_SORT(p[ 1],p[10]); PIX_SORT(p[11],p[20]);
PIX_SORT(p[ 2],p[20]); PIX_SORT(p[ 2],p[11]); PIX_SORT(p[12],p[21]);
PIX_SORT(p[ 3],p[21]); PIX_SORT(p[ 3],p[12]); PIX_SORT(p[13],p[22]);
PIX_SORT(p[ 4],p[22]); PIX_SORT(p[ 4],p[13]); PIX_SORT(p[14],p[23]);
PIX_SORT(p[ 5],p[23]); PIX_SORT(p[ 5],p[14]); PIX_SORT(p[15],p[24]);
PIX_SORT(p[ 6],p[24]); PIX_SORT(p[ 6],p[15]); PIX_SORT(p[ 7],p[16]);
PIX_SORT(p[ 7],p[19]); PIX_SORT(p[13],p[21]); PIX_SORT(p[15],p[23]);
PIX_SORT(p[ 7],p[13]); PIX_SORT(p[ 7],p[15]); PIX_SORT(p[ 1],p[ 9]);
PIX_SORT(p[ 3],p[11]); PIX_SORT(p[ 5],p[17]); PIX_SORT(p[11],p[17]);
PIX_SORT(p[ 9],p[17]); PIX_SORT(p[ 4],p[10]); PIX_SORT(p[ 6],p[12]);
PIX_SORT(p[ 7],p[14]); PIX_SORT(p[ 4],p[ 6]); PIX_SORT(p[ 4],p[ 7]);
PIX_SORT(p[12],p[14]); PIX_SORT(p[10],p[14]); PIX_SORT(p[ 6],p[ 7]);
PIX_SORT(p[10],p[12]); PIX_SORT(p[ 6],p[10]); PIX_SORT(p[ 6],p[17]);
PIX_SORT(p[12],p[17]); PIX_SORT(p[ 7],p[17]); PIX_SORT(p[ 7],p[10]);
PIX_SORT(p[12],p[18]); PIX_SORT(p[ 7],p[12]); PIX_SORT(p[10],p[18]);
PIX_SORT(p[12],p[20]); PIX_SORT(p[10],p[20]); PIX_SORT(p[10],p[12]);
pc[0][1] = p[12];
}
/* Apply 3x3 Laplacian fileter */
edge_cnt = smooth_cnt = 0;
for (row=1; row < height-1; row++)
for (col=1; col < width-1; col++) {
pc = mf + row*width+col;
EA = 0.8182*(pc[-w1][1]+pc[-1][1]+pc[1][1]+pc[w1][1])-3.6364*pc[0][1]+
0.0909*(pc[-w1-1][1]+pc[-w1+1][1]+pc[w1-1][1]+pc[w1+1][1]);
if (EA > T || EA < -T) {
pc[0][c] = -9999999;
edge_cnt++; }
else {
pc[0][c] = pc[0][1];
smooth_cnt++; }
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf (stderr,_(" edge = %5.2f (%%)\n"),
100.*(double)(edge_cnt)/(double)(edge_cnt+smooth_cnt));
#endif
}
/* Make sure we don't mess up with edges */
for (row=1; row < height-1; row++)
for (col=1; col < width-1; col++) {
pc = mf + row*width+col;
if (pc[0][0] == -9999999 || pc[0][2] == -9999999)
pc[0][0] = pc[0][2] = -9999999;
}
/* Now median(R-G) and median(B-G) are computed */
/* red/blue at GREEN pixel locations */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
indx = row*width+col;
for (i=0; i < 2; c=2-c, i++)
if (mf[indx][c] != -9999999) {
v0 = image[indx][1]+mf[indx][c];
image[indx][c] = CLIP(v0); }
}
/* red/blue at BLUE/RED pixel locations */
for (row=2; row < height-2; row++)
for (col=2+(FC(row,2) & 1), c=2-FC(row,col); col < width-2; col+=2) {
indx = row*width+col;
if (mf[indx][c] != -9999999) {
v0 = image[indx][1]+mf[indx][c];
image[indx][c] = CLIP(v0); }
}
/* green at RED/BLUE location */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=FC(row,col); col < width-3; col+=2) {
indx = row*width+col;
d = 2 - c;
if (mf[indx][c] != -9999999) {
if (mf[indx][d] != -9999999)
v0 = (image[indx][c]-mf[indx][c]+image[indx][d]-mf[indx][d]+1) >> 1;
else
v0 = (image[indx][c]-mf[indx][c]+image[indx][1]+1) >> 1; }
else {
if (mf[indx][d] != -9999999)
v0 = (image[indx][d]-mf[indx][d]+image[indx][1]+1) >> 1;
else
v0 = image[indx][1]; }
image[indx][1] = CLIP(v0);
}
/* Update interpolated pixels after differential median filter */
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("\tUpdate R,G,B..."));
#endif
/* Update red & blue at GREEN by averaging color differential values */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,2) & 1), c=FC(row,col+1); col < width-1; col+=2) {
indx = row*width+col;
pix = image + indx;
if (mf[indx][c] != -9999999) {
v0 = (pix[-1][c]+pix[1][c]+2*pix[0][1]-pix[-1][1]-pix[1][1]+1) >> 1;
pix[0][c] = CLIP(v0); }
c = 2 - c;
if (mf[indx][c] != -9999999) {
v0 = (pix[-w1][c]+pix[w1][c]+2*pix[0][1]-pix[-w1][1]-pix[w1][1]+1) >> 1;
pix[0][c] = CLIP(v0); }
c = 2 - c;
}
/* Update red/blue at BLUE/RED pixels by pattern recognition */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=2-FC(row,col); col < width-1; col+=2) {
indx = row*width+col;
if (mf[indx][c] != -9999999) {
pix = image + indx;
dC1 = pix[-w1-1][1]-pix[-w1-1][c];
dC2 = pix[-w1+1][1]-pix[-w1+1][c];
dC3 = pix[ w1-1][1]-pix[ w1-1][c];
dC4 = pix[ w1+1][1]-pix[ w1+1][c];
dC0 = dC1 + dC2 + dC3 + dC4;
dC1 <<= 2;
dC2 <<= 2;
dC3 <<= 2;
dC4 <<= 2;
j = (dC1 > dC0) + (dC2 > dC0) + (dC3 > dC0) + (dC4 > dC0);
if (j == 3 || j == 1) {
/* edge-corner pattern: median of color differential values */
PIX_SORT(dC1,dC2);
PIX_SORT(dC3,dC4);
PIX_SORT(dC1,dC3);
PIX_SORT(dC2,dC4);
dC0 = dC2 + dC3; }
else {
/* stripe pattern: average along diagonal */
v1 = ABS(pix[-w1-1][c]-pix[w1+1][c]);
v2 = ABS(pix[-w1+1][c]-pix[w1-1][c]);
if (v1 < v2)
dC0 = dC1 + dC4;
else
dC0 = dC2 + dC3; }
v0 = (((int)(pix[0][1]) << 3) - dC0 + 4) >> 3;
pix[0][c] = CLIP(v0); }
}
/* Update green at RED/BLUE pixels by pattern recognition */
for (row=1; row < height-1; row++)
for (col=1+(FC(row,1) & 1), c=FC(row,col); col < width-1; col+=2) {
indx = row*width+col;
if (mf[indx][c] != -9999999) {
pix = image + indx;
dC1 = pix[-w1][c]-pix[-w1][1];
dC2 = pix[ -1][c]-pix[ -1][1];
dC3 = pix[ 1][c]-pix[ 1][1];
dC4 = pix[ w1][c]-pix[ w1][1];
dC0 = dC1 + dC2 + dC3 + dC4;
dC1 <<= 2;
dC2 <<= 2;
dC3 <<= 2;
dC4 <<= 2;
j = (dC1 > dC0) + (dC2 > dC0) + (dC3 > dC0) + (dC4 > dC0);
if (j == 3 || j == 1) {
/* edge-corner pattern: median of color differential values */
PIX_SORT(dC1,dC2);
PIX_SORT(dC3,dC4);
PIX_SORT(dC1,dC3);
PIX_SORT(dC2,dC4);
dC0 = dC2 + dC3; }
else {
/* stripe pattern: average along diagonal */
v1 = ABS(pix[-w1][1]-pix[w1][1]);
v2 = ABS(pix[ -1][1]-pix[ 1][1]);
if (v1 < v2)
dC0 = dC1 + dC4;
else
dC0 = dC2 + dC3; }
v0 = (((int)(pix[0][c]) << 3) - dC0 + 4) >> 3;
pix[0][1] = CLIP(v0);
}
}
#ifdef DCRAW_VERBOSE
if (verbose) fprintf (stderr,_("\n"));
#endif
}
/* Free buffer */
free(mf);
}
#undef PIX_SORT
|
utils.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar (Intel Corp.)
******************************************************************************/
#ifndef _UTILS_H_
#define _UTILS_H_
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_num_threads() (1)
#define omp_get_thread_num() (0)
#define omp_get_max_threads() (1)
#endif
const int alignment = 64;
typedef long ITyp;
typedef float FTyp;
extern thread_local struct drand48_data rand_buf;
static double get_time() {
static bool init_done = false;
static struct timespec stp = {0,0};
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
/*clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);*/
if(!init_done) {
init_done = true;
stp = tp;
}
double ret = (tp.tv_sec - stp.tv_sec) * 1e3 + (tp.tv_nsec - stp.tv_nsec)*1e-6;
return ret;
}
void set_random_seed(int seed);
template<typename T>
void init_zero(size_t sz, T *buf)
{
#pragma omp parallel for
for(size_t i = 0; i < sz; i++)
buf[i] = (T)0;
}
template<typename T>
void init_random(size_t sz, T *buf, T low = -0.1, T high = 0.1)
{
T range = high - low;
#pragma omp parallel for schedule(static)
for(size_t i = 0; i < sz; i++) {
double randval;
drand48_r(&rand_buf, &randval);
buf[i] = randval * range - low;
}
}
inline void *my_malloc(size_t sz, size_t align)
{
return _mm_malloc(sz, align);
}
inline void my_free(void *p)
{
_mm_free(p);
}
#endif /*_UTILS_H_*/
|
GB_unop__identity_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fp64
// op(A') function: GB_unop_tran__identity_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij = GB_cast_to_int32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fp64
(
int32_t *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
relu1_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: bzhang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
int ref_relu1_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = src[i];
if (dst[i] > 1)
dst[i] = 1;
if (dst[i] < -1)
dst[i] = -1;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
ref_relu1_fp32(input_tensor, output_tensor, exec_graph->num_thread);
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* ir_graph = node->graph;
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_relu1_ref_op()
{
return register_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
int unregister_relu1_ref_op()
{
return unregister_builtin_node_ops(OP_RELU1, &hcl_node_ops);
}
|
task-4.c | /* { dg-do run } */
#include <omp.h>
#include <stdlib.h>
#include <string.h>
int e;
void __attribute__((noinline))
baz (int i, int *p, int j, int *q)
{
if (p[0] != 1 || p[i] != 3 || q[0] != 2 || q[j] != 4)
#pragma omp atomic
e++;
}
void __attribute__((noinline))
foo (int i, int j)
{
int p[i + 1];
int q[j + 1];
memset (p, 0, sizeof (p));
memset (q, 0, sizeof (q));
p[0] = 1;
p[i] = 3;
q[0] = 2;
q[j] = 4;
#pragma omp task firstprivate (p, q)
baz (i, p, j, q);
}
int
main (void)
{
#pragma omp parallel num_threads (4)
foo (5 + omp_get_thread_num (), 7 + omp_get_thread_num ());
if (e)
abort ();
return 0;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morpology is the the application of various kernels, of any size and even
% shape, to a image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
** The following test is for special floating point numbers of value NaN (not
** a number), that may be used within a Kernel Definition. NaN's are defined
** as part of the IEEE standard for floating point number representation.
**
** These are used as a Kernel value to mean that this kernel position is not
** part of the kernel neighbourhood for convolution or morphology processing,
** and thus should be ignored. This allows the use of 'shaped' kernels.
**
** The special properity that two NaN's are never equal, even if they are from
** the same variable allow you to test if a value is special NaN value.
**
** This macro IsNaN() is thus is only true if the value given is NaN.
*/
#define IsNan(a) ((a)!=(a))
/*
Other global definitions used by module.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel = kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *)NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum = +MagickHuge;
kernel->maximum = -MagickHuge;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if ( kernel->minimum == MagickHuge )
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
token[MaxTextExtent];
const char
*p;
size_t
kernel_number;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p = kernel_string;
kernel = NULL;
kernel_number = 0;
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
/* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) *token) != 0)
new_kernel = ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel = ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
(void) FormatLocaleFile(stderr, "Failed to parse kernel number #%.20g\n",
(double) kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if ( kernel == (KernelInfo *) NULL )
kernel = new_kernel;
else
LastKernelInfo(kernel)->next = new_kernel;
}
/* look for the next kernel in list */
p = strchr(p, ';');
if ( p == (char *) NULL )
break;
p++;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *)NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) AcquireAlignedMemory(1,
sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(double));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3] = +MagickSQ2;
kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) > MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if ( kernel->next != (KernelInfo *) NULL )
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *)RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
while(1) {
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
% It is basically equivalent to as MorphologyImageChannel() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImageChannel() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically kernels are not normalized/scaled/blended by the
% 'convolve:scale' Image Artifact (setting), nor is the convolve bias
% (-bias setting or image->bias) loooked at, but must be supplied from the
% function arguments.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
changed=0;
progress=0;
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
image->rows+kernel->height-1, exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
v;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(
p_indexes+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)));
gamma += alpha;
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( p[r].opacity != GetPixelOpacity(q))
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (MagickRealType) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = (MagickRealType) 0;
/* default result is the original pixel value */
result.red = (MagickRealType) p[r].red;
result.green = (MagickRealType) p[r].green;
result.blue = (MagickRealType) p[r].blue;
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) GetPixelIndex(p_indexes+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(
result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-
k_pixels[u].opacity));
gamma += alpha;
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireCacheView(image);
auth_view=AcquireCacheView(image);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[80];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special == MagickTrue )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose == MagickTrue )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose == MagickTrue ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
artifact = GetImageArtifact(image,"morphology:compose");
compose = UndefinedCompositeOp; /* use default for method */
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod
method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo
*exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
MagickRealType t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) > MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) > MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) > MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNan(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNan(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNan(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
pzgstrs_lsum.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Perform local block modifications: lsum[i] -= L_i,k * X[k]
*
* <pre>
* -- Distributed SuperLU routine (version 6.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* March 15, 2003
*
* Modified:
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 2, 2001 use MPI_Isend/MPI_Irecv with MPI_Test
* February 8, 2019 version 6.1.1
* </pre>
*/
#include "superlu_zdefs.h"
#include "superlu_defs.h"
#ifndef CACHELINE
#define CACHELINE 64 /* bytes, Xeon Phi KNL, Cori haswell, Edision */
#endif
#define ISEND_IRECV
/*
* Function prototypes
*/
#ifdef _CRAY
fortran void CTRSM(_fcd, _fcd, _fcd, _fcd, int*, int*, doublecomplex*,
doublecomplex*, int*, doublecomplex*, int*);
fortran void CGEMM(_fcd, _fcd, int*, int*, int*, doublecomplex*, doublecomplex*,
int*, doublecomplex*, int*, doublecomplex*, doublecomplex*, int*);
_fcd ftcs1;
_fcd ftcs2;
_fcd ftcs3;
#endif
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t lptr, /* Starting position in lsub[*]. */
int_t luptr, /* Starting position in lusup[*]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
int iam, iknsupc, myrow, nbrow, nsupr, nsupr1, p, pi;
int_t i, ii, ik, il, ikcol, irow, j, lb, lk, lib, rel;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
MPI_Status status;
int test_flag;
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
#if ( PROFlevel>=1 )
TIC(t1);
#endif
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
lusup = Llu->Lnzval_bc_ptr[lk];
nsupr = lsub[1];
for (lb = 0; lb < nlb; ++lb) {
ik = lsub[lptr]; /* Global block number, row-wise. */
nbrow = lsub[lptr+1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr], &nsupr, xk,
&knsupc, &beta, rtemp, &nbrow );
#endif
stat->ops[SOLVE] += 8 * nbrow * nrhs * knsupc + 2 * nbrow * nrhs;
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
dest = &lsum[il];
lptr += LB_DESCRIPTOR;
rel = xsup[ik]; /* Global row index of block ik. */
for (i = 0; i < nbrow; ++i) {
irow = lsub[lptr++] - rel; /* Relative row. */
RHS_ITERATE(j)
z_sub(&dest[irow + j*iknsupc],
&dest[irow + j*iknsupc],
&rtemp[i + j*nbrow]);
}
luptr += nbrow;
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_GEMM] += t2;
#endif
if ( (--fmod[lk])==0 ) { /* Local accumulation done. */
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( lk );
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc]);
if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat->utime[SOL_TRSM] += t2;
#endif
stat->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( fsendx_plist[lk][p] != EMPTY ) {
pi = PNUM( p, ikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
nlb1 = lsub1[0] - 1;
lptr1 = BC_HEADER + LB_DESCRIPTOR + iknsupc;
luptr1 = iknsupc; /* Skip diagonal block L(I,I). */
zlsum_fmod(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, lptr1, luptr1, xsup,
grid, Llu, send_req, stat);
} /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
} /* for lb ... */
} /* zLSUM_FMOD */
/************************************************************************/
void zlsum_bmod
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
MPI_Request send_req[], /* input/output */
SuperLUStat_t *stat
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
doublecomplex temp;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
MPI_Status status;
int test_flag;
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
for (irow = fnz; irow < iklrow; ++irow) {
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
if ( (--bmod[ik]) == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
#ifdef ISEND_IRECV
MPI_Isend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#else
MPI_Send( &lsum[il - LSUM_H], iknsupc * nrhs + LSUM_H,
SuperLU_MPI_DOUBLE_COMPLEX, p, LSUM, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc], &dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
if ( !brecv[ik] ) { /* Becomes a leaf node. */
bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
stat->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * iknsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
for (p = 0; p < grid->nprow; ++p) {
if ( bsendx_plist[lk1][p] != EMPTY ) {
pi = PNUM( p, gikcol, grid );
#ifdef ISEND_IRECV
MPI_Isend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm,
&send_req[Llu->SolveMsgSent++] );
#else
#ifdef BSEND
MPI_Bsend( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#else
MPI_Send( &x[ii - XK_H], iknsupc * nrhs + XK_H,
SuperLU_MPI_DOUBLE_COMPLEX, pi, Xk, grid->comm );
#endif
#endif
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent X[%2.0f] to P %2d\n",
iam, x[ii-XK_H], pi);
#endif
}
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
zlsum_bmod(lsum, x, &x[ii], nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
send_req, stat);
} /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
} /* zlSUM_BMOD */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod_inv
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t *leaf_send,
int_t *nleaf_send,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0},malpha={-1.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
doublecomplex *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r,m;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
doublecomplex *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
int knsupc; /* Size of supernode k. */
int_t nlb; /* Number of L blocks. */
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
lsub = Llu->Lrowind_bc_ptr[lk];
nlb = lsub[0] - 1;
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>8*maxsuper){
// if(0){
// Nchunk=floor(num_thread/2.0)+1;
Nchunk=SUPERLU_MIN(num_thread,nlb);
// Nchunk=1;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j,nleaf_send_tmp) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id1],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id1],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
for (lb=lbstart;lb<lbend;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
// RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],'z');
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
zlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id1,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
// TOC(t3, t1);
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
#ifdef _OPENMP
#pragma omp atomic capture
#endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
lptr1_tmp = lloc[lb+idx_i];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
leaf_send[(nleaf_send_tmp-1)*aln_i] = -lk-1;
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_lsum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nleaf_send_tmp = ++nleaf_send[0];
// printf("nleaf_send_tmp %5d lk %5d\n",nleaf_send_tmp);
leaf_send[(nleaf_send_tmp-1)*aln_i] = lk;
// BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1) untied priority(1)
// #endif
{
zlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, ik,
fmod, xsup,
grid, Llu, stat, leaf_send, nleaf_send ,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
}
stat[thread_id]->ops[SOLVE] += 8 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* zLSUM_FMOD_INV */
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
* Perform local block modifications: lsum[i] -= L_i,k * X[k].
* </pre>
*/
void zlsum_fmod_inv_master
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local) */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int knsupc, /* Size of supernode k. */
int_t k, /* The k-th component of X. */
int_t *fmod, /* Modification count for L-solve. */
int_t nlb, /* Number of L blocks. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int_t recurlevel,
int_t maxsuper,
int thread_id,
int num_thread
)
{
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0},malpha={-1.0, 0.0};
doublecomplex *lusup, *lusup1;
doublecomplex *dest;
doublecomplex *Linv;/* Inverse of diagonal block */
int iam, iknsupc, myrow, krow, nbrow, nbrow1, nbrow_ref, nsupr, nsupr1, p, pi, idx_r;
int_t i, ii,jj, ik, il, ikcol, irow, j, lb, lk, rel, lib,lready;
int_t *lsub, *lsub1, nlb1, lptr1, luptr1,*lloc;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *frecv = Llu->frecv;
int_t **fsendx_plist = Llu->fsendx_plist;
int_t luptr_tmp,luptr_tmp1,lptr1_tmp,maxrecvsz, idx_i, idx_v,idx_n, idx_l, fmod_tmp, lbstart,lbend,nn,Nchunk,nlb_loc,remainder;
int thread_id1;
int m;
flops_t ops_loc=0.0;
MPI_Status status;
int test_flag;
yes_no_t done;
BcTree *LBtree_ptr = Llu->LBtree_ptr;
RdTree *LRtree_ptr = Llu->LRtree_ptr;
int_t* idx_lsum,idx_lsum1;
doublecomplex *rtemp_loc;
int_t ldalsum;
int_t nleaf_send_tmp;
int_t lptr; /* Starting position in lsub[*]. */
int_t luptr; /* Starting position in lusup[*]. */
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
ldalsum=Llu->ldalsum;
rtemp_loc = &rtemp[sizertemp* thread_id];
// #if ( PROFlevel>=1 )
double t1, t2, t3, t4;
float msg_vol = 0, msg_cnt = 0;
// #endif
if(nlb>0){
iam = grid->iam;
myrow = MYROW( iam, grid );
lk = LBj( k, grid ); /* Local block number, column-wise. */
// printf("ya1 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lsub = Llu->Lrowind_bc_ptr[lk];
// printf("ya2 %5d k %5d lk %5d\n",thread_id,k,lk);
// fflush(stdout);
lusup = Llu->Lnzval_bc_ptr[lk];
lloc = Llu->Lindval_loc_bc_ptr[lk];
// idx_lsum = Llu->Lrowind_bc_2_lsum[lk];
nsupr = lsub[1];
// printf("nlb: %5d lk: %5d\n",nlb,lk);
// fflush(stdout);
krow = PROW( k, grid );
if(myrow==krow){
idx_n = 1;
idx_i = nlb+2;
idx_v = 2*nlb+3;
luptr_tmp = lloc[idx_v];
m = nsupr-knsupc;
}else{
idx_n = 0;
idx_i = nlb;
idx_v = 2*nlb;
luptr_tmp = lloc[idx_v];
m = nsupr;
}
assert(m>0);
if(m>4*maxsuper || nrhs>10){
// if(m<1){
// TIC(t1);
Nchunk=num_thread;
nlb_loc = floor(((double)nlb)/Nchunk);
remainder = nlb % Nchunk;
#ifdef _OPENMP
#pragma omp taskloop private (lptr1,luptr1,nlb1,thread_id1,lsub1,lusup1,nsupr1,Linv,nn,lbstart,lbend,luptr_tmp1,nbrow,lb,lptr1_tmp,rtemp_loc,nbrow_ref,lptr,nbrow1,ik,rel,lk,iknsupc,il,i,irow,fmod_tmp,ikcol,p,ii,jj,t1,t2,j) untied
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nlb_loc+1);
lbend = (nn+1)*(nlb_loc+1);
}else{
lbstart = remainder+nn*nlb_loc;
lbend = remainder + (nn+1)*nlb_loc;
}
if(lbstart<lbend){
#if ( PROFlevel>=1 )
TIC(t1);
#endif
luptr_tmp1 = lloc[lbstart+idx_v];
nbrow=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow, 1, 1 );
#else
zgemm_( "N", "N", &nbrow, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp1], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &nbrow );
#endif
nbrow_ref=0;
for (lb = lbstart; lb < lbend; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc],
&lsum[il+irow + j*iknsupc],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}
}else{
#if ( PROFlevel>=1 )
TIC(t1);
#endif
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m, 1, 1 );
#else
zgemm_( "N", "N", &m, &nrhs, &knsupc,
&alpha, &lusup[luptr_tmp], &nsupr, xk,
&knsupc, &beta, rtemp_loc, &m );
#endif
nbrow=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
nbrow += lsub[lptr1_tmp+1];
}
nbrow_ref=0;
for (lb = 0; lb < nlb; ++lb){
lptr1_tmp = lloc[lb+idx_i];
lptr= lptr1_tmp+2;
nbrow1 = lsub[lptr1_tmp+1];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
rel = xsup[ik]; /* Global row index of block ik. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd lastprivate(irow)
#endif
for (i = 0; i < nbrow1; ++i) {
irow = lsub[lptr+i] - rel; /* Relative row. */
z_sub(&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&lsum[il+irow + j*iknsupc+sizelsum*thread_id],
&rtemp_loc[nbrow_ref+i + j*nbrow]);
}
nbrow_ref+=nbrow1;
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
// TOC(t3, t1);
rtemp_loc = &rtemp[sizertemp* thread_id];
for (lb=0;lb<nlb;lb++){
lk = lloc[lb+idx_n];
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
fmod_tmp=--fmod[lk*aln_i];
if ( fmod_tmp==0 ) { /* Local accumulation done. */
// --fmod[lk];
lptr1_tmp = lloc[lb+idx_i];
// luptr_tmp = lloc[lb+idx_v];
ik = lsub[lptr1_tmp]; /* Global block number, row-wise. */
lk = LBi( ik, grid ); /* Local block number, row-wise. */
iknsupc = SuperSize( ik );
il = LSUM_BLK( lk );
// nbrow = lsub[lptr1_tmp+1];
ikcol = PCOL( ik, grid );
p = PNUM( myrow, ikcol, grid );
if ( iam != p ) {
// if(frecv[lk]==0){
// fmod[lk] = -1;
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(LRtree_ptr[lk],'z')*nrhs+LSUM_H,'z');
// }
} else { /* Diagonal process: X[i] += lsum[i]. */
// if ( frecv[lk]==0 ) { /* Becomes a leaf node. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( lk );
// for (jj=0;jj<num_thread;jj++)
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&x[i + ii + j*iknsupc],
&x[i + ii + j*iknsupc],
&lsum[i + il + j*iknsupc] );
// fmod[lk] = -1; /* Do not solve X[k] in the future. */
lk = LBj( ik, grid );/* Local block number, column-wise. */
lsub1 = Llu->Lrowind_bc_ptr[lk];
lusup1 = Llu->Lnzval_bc_ptr[lk];
nsupr1 = lsub1[1];
if(Llu->inv == 1){
Linv = Llu->Linv_bc_ptr[lk];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Linv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "L", "N", "U", &iknsupc, &nrhs, &alpha,
lusup1, &nsupr1, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc - 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, ik);
#endif
/*
* Send Xk to process column Pc[k].
*/
if(LBtree_ptr[lk]!=NULL)
BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'z')*nrhs+XK_H,'z');
/*
* Perform local block modifications.
*/
// #ifdef _OPENMP
// #pragma omp task firstprivate (Llu,sizelsum,iknsupc,ii,ik,lsub1,x,rtemp,fmod,lsum,stat,nrhs,grid,xsup,recurlevel) private(lptr1,luptr1,nlb1,thread_id1) untied priority(1)
// #endif
{
nlb1 = lsub1[0] - 1;
zlsum_fmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, iknsupc, ik,
fmod, nlb1, xsup,
grid, Llu, stat,sizelsum,sizertemp,1+recurlevel,maxsuper,thread_id,num_thread);
}
// } /* if frecv[lk] == 0 */
} /* if iam == p */
} /* if fmod[lk] == 0 */
}
// }
stat[thread_id]->ops[SOLVE] += 8 * m * nrhs * knsupc;
} /* if nlb>0*/
} /* zLSUM_FMOD_INV */
/************************************************************************/
void zlsum_bmod_inv
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t* root_send,
int_t* nroot_send,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
doublecomplex *rtemp_loc;
int_t nroot_send_tmp;
doublecomplex *Uinv;/* Inverse of diagonal block */
doublecomplex temp;
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
if(Llu->Unnz[lk]>knsupc*64 || nub>16){
// if(nub>num_thread){
// if(nub>16){
// // // // if(Urbs2[lk]>num_thread){
// if(Urbs2[lk]>0){
Nchunk=SUPERLU_MIN(num_thread,nub);
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
// printf("Unnz: %5d nub: %5d knsupc: %5d\n",Llu->Unnz[lk],nub,knsupc);
#ifdef _OPENMP
#pragma omp taskloop firstprivate (stat) private (thread_id1,Uinv,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,lk1,gik,gikcol,usub,uval,lsub,lusup,iknsupc,il,i,irow,bmod_tmp,p,ii,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz,nsupr) untied nogroup
#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id1]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id1)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("x_usum: %f %f\n",x[ii+i].r,x[ii+i].i);
// fflush(stdout);
// }
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id1]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id1,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
}
} else {
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
#if ( PROFlevel>=1 )
TIC(t1);
#endif
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
#ifdef _OPENMP
#pragma omp atomic capture
#endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = -ik-1;
// RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
#ifdef _OPENMP
#pragma omp atomic capture
#endif
nroot_send_tmp = ++nroot_send[0];
root_send[(nroot_send_tmp-1)*aln_i] = lk1;
// BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] )
// if(Urbs[lk1]>16){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
// zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
// Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
// stat, root_send, nroot_send, sizelsum,sizertemp);
//}else{
zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, root_send, nroot_send, sizelsum,sizertemp,thread_id,num_thread);
//}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
} /* for ub ... */
}
} /* zlSUM_BMOD_inv */
/************************************************************************/
void zlsum_bmod_inv_master
/************************************************************************/
(
doublecomplex *lsum, /* Sum of local modifications. */
doublecomplex *x, /* X array (local). */
doublecomplex *xk, /* X[k]. */
doublecomplex *rtemp, /* Result of full matrix-vector multiply. */
int nrhs, /* Number of right-hand sides. */
int_t k, /* The k-th component of X. */
int_t *bmod, /* Modification count for L-solve. */
int_t *Urbs, /* Number of row blocks in each block column of U.*/
Ucb_indptr_t **Ucb_indptr,/* Vertical linked list pointing to Uindex[].*/
int_t **Ucb_valptr, /* Vertical linked list pointing to Unzval[]. */
int_t *xsup,
gridinfo_t *grid,
LocalLU_t *Llu,
SuperLUStat_t **stat,
int_t sizelsum,
int_t sizertemp,
int thread_id,
int num_thread
)
{
/*
* Purpose
* =======
* Perform local block modifications: lsum[i] -= U_i,k * X[k].
*/
doublecomplex alpha = {1.0, 0.0}, beta = {0.0, 0.0};
int iam, iknsupc, knsupc, myrow, nsupr, p, pi;
int_t fnz, gik, gikcol, i, ii, ik, ikfrow, iklrow, il, irow,
j, jj, lk, lk1, nub, ub, uptr;
int_t *usub;
doublecomplex *uval, *dest, *y;
int_t *lsub;
doublecomplex *lusup;
int_t *ilsum = Llu->ilsum; /* Starting position of each supernode in lsum. */
int_t *brecv = Llu->brecv;
int_t **bsendx_plist = Llu->bsendx_plist;
BcTree *UBtree_ptr = Llu->UBtree_ptr;
RdTree *URtree_ptr = Llu->URtree_ptr;
MPI_Status status;
int test_flag;
int_t bmod_tmp;
int thread_id1;
doublecomplex *rtemp_loc;
doublecomplex temp;
doublecomplex *Uinv;/* Inverse of diagonal block */
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
int_t Nchunk, nub_loc,remainder,nn,lbstart,lbend;
int_t iword = sizeof(int_t);
int_t dword = sizeof (double);
int_t aln_d,aln_i;
aln_d = ceil(CACHELINE/(double)dword);
aln_i = ceil(CACHELINE/(double)iword);
rtemp_loc = &rtemp[sizertemp* thread_id];
iam = grid->iam;
myrow = MYROW( iam, grid );
knsupc = SuperSize( k );
lk = LBj( k, grid ); /* Local block number, column-wise. */
nub = Urbs[lk]; /* Number of U blocks in block column lk */
// printf("Urbs2[lk] %5d lk %5d nub %5d\n",Urbs2[lk],lk,nub);
// fflush(stdout);
if(nub>num_thread){
// if(nub>0){
Nchunk=num_thread;
nub_loc = floor(((double)nub)/Nchunk);
remainder = nub % Nchunk;
//#ifdef _OPENMP
//#pragma omp taskloop firstprivate (stat) private (thread_id1,nn,lbstart,lbend,ub,temp,rtemp_loc,ik,gik,usub,uval,iknsupc,il,i,irow,jj,t1,t2,j,ikfrow,iklrow,dest,y,uptr,fnz) untied
//#endif
for (nn=0;nn<Nchunk;++nn){
#ifdef _OPENMP
thread_id1 = omp_get_thread_num ();
#else
thread_id1 = 0;
#endif
rtemp_loc = &rtemp[sizertemp* thread_id1];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
if(nn<remainder){
lbstart = nn*(nub_loc+1);
lbend = (nn+1)*(nub_loc+1);
}else{
lbstart = remainder+nn*nub_loc;
lbend = remainder + (nn+1)*nub_loc;
}
for (ub = lbstart; ub < lbend; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id1];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id1]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id1]->utime[SOL_GEMM] += t2;
#endif
}
}else{
rtemp_loc = &rtemp[sizertemp* thread_id];
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ub = 0; ub < nub; ++ub) {
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
usub = Llu->Ufstnz_br_ptr[ik];
uval = Llu->Unzval_br_ptr[ik];
i = Ucb_indptr[lk][ub].indpos; /* Start of the block in usub[]. */
i += UB_DESCRIPTOR;
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
ikfrow = FstBlockC( gik );
iklrow = FstBlockC( gik+1 );
RHS_ITERATE(j) {
dest = &lsum[il + j*iknsupc+sizelsum*thread_id];
y = &xk[j*knsupc];
uptr = Ucb_valptr[lk][ub]; /* Start of the block in uval[]. */
for (jj = 0; jj < knsupc; ++jj) {
fnz = usub[i + jj];
if ( fnz < iklrow ) { /* Nonzero segment. */
/* AXPY */
#ifdef _OPENMP
#pragma omp simd
#endif
for (irow = fnz; irow < iklrow; ++irow)
{
zz_mult(&temp, &uval[uptr], &y[jj]);
z_sub(&dest[irow - ikfrow], &dest[irow - ikfrow],
&temp);
++uptr;
}
stat[thread_id]->ops[SOLVE] += 8 * (iklrow - fnz);
}
} /* for jj ... */
}
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_GEMM] += t2;
#endif
}
rtemp_loc = &rtemp[sizertemp* thread_id];
for (ub = 0; ub < nub; ++ub){
ik = Ucb_indptr[lk][ub].lbnum; /* Local block number, row-wise. */
il = LSUM_BLK( ik );
gik = ik * grid->nprow + myrow;/* Global block number, row-wise. */
iknsupc = SuperSize( gik );
// #ifdef _OPENMP
// #pragma omp atomic capture
// #endif
bmod_tmp=--bmod[ik*aln_i];
if ( bmod_tmp == 0 ) { /* Local accumulation done. */
gikcol = PCOL( gik, grid );
p = PNUM( myrow, gikcol, grid );
if ( iam != p ) {
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
RdTree_forwardMessageSimple(URtree_ptr[ik],&lsum[il - LSUM_H ],RdTree_GetMsgSize(URtree_ptr[ik],'z')*nrhs+LSUM_H,'z');
#if ( DEBUGlevel>=2 )
printf("(%2d) Sent LSUM[%2.0f], size %2d, to P %2d\n",
iam, lsum[il-LSUM_H], iknsupc*nrhs+LSUM_H, p);
#endif
} else { /* Diagonal process: X[i] += lsum[i]. */
#if ( PROFlevel>=1 )
TIC(t1);
#endif
for (ii=1;ii<num_thread;ii++)
// if(ii!=thread_id)
#ifdef _OPENMP
#pragma omp simd
#endif
for (jj=0;jj<iknsupc*nrhs;jj++)
z_add(&lsum[il + jj ],
&lsum[il + jj ],
&lsum[il + jj + ii*sizelsum]);
ii = X_BLK( ik );
dest = &x[ii];
RHS_ITERATE(j)
#ifdef _OPENMP
#pragma omp simd
#endif
for (i = 0; i < iknsupc; ++i)
z_add(&dest[i + j*iknsupc],
&dest[i + j*iknsupc],
&lsum[i + il + j*iknsupc]);
// if ( !brecv[ik] ) { /* Becomes a leaf node. */
// bmod[ik] = -1; /* Do not solve X[k] in the future. */
lk1 = LBj( gik, grid ); /* Local block number. */
lsub = Llu->Lrowind_bc_ptr[lk1];
lusup = Llu->Lnzval_bc_ptr[lk1];
nsupr = lsub[1];
if(Llu->inv == 1){
Uinv = Llu->Uinv_bc_ptr[lk1];
#ifdef _CRAY
CGEMM( ftcs2, ftcs2, &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#elif defined (USE_VENDOR_BLAS)
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc, 1, 1 );
#else
zgemm_( "N", "N", &iknsupc, &nrhs, &iknsupc,
&alpha, Uinv, &iknsupc, &x[ii],
&iknsupc, &beta, rtemp_loc, &iknsupc );
#endif
#ifdef _OPENMP
#pragma omp simd
#endif
for (i=0 ; i<iknsupc*nrhs ; i++){
z_copy(&x[ii+i],&rtemp_loc[i]);
}
}else{
#ifdef _CRAY
CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#elif defined (USE_VENDOR_BLAS)
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc, 1, 1, 1, 1);
#else
ztrsm_("L", "U", "N", "N", &iknsupc, &nrhs, &alpha,
lusup, &nsupr, &x[ii], &iknsupc);
#endif
}
#if ( PROFlevel>=1 )
TOC(t2, t1);
stat[thread_id]->utime[SOL_TRSM] += t2;
#endif
stat[thread_id]->ops[SOLVE] += 4 * iknsupc * (iknsupc + 1) * nrhs
+ 10 * knsupc * nrhs; /* complex division */
#if ( DEBUGlevel>=2 )
printf("(%2d) Solve X[%2d]\n", iam, gik);
#endif
/*
* Send Xk to process column Pc[k].
*/
// for (i=0 ; i<iknsupc*nrhs ; i++){
// printf("xre: %f\n",x[ii+i]);
// fflush(stdout);
// }
if(UBtree_ptr[lk1]!=NULL){
BcTree_forwardMessageSimple(UBtree_ptr[lk1],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk1],'z')*nrhs+XK_H,'z');
}
/*
* Perform local block modifications.
*/
if ( Urbs[lk1] ){
// #ifdef _OPENMP
// #pragma omp task firstprivate (Ucb_indptr,Ucb_valptr,Llu,sizelsum,ii,gik,x,rtemp,bmod,Urbs,lsum,stat,nrhs,grid,xsup) untied
// #endif
{
zlsum_bmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, gik, bmod, Urbs,
Ucb_indptr, Ucb_valptr, xsup, grid, Llu,
stat, sizelsum,sizertemp,thread_id,num_thread);
}
}
// } /* if brecv[ik] == 0 */
}
} /* if bmod[ik] == 0 */
}
} /* zlsum_bmod_inv_master */
|
ompfor-static.c | /*
* Static schedule
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void foo(int lower, int upper, int stride)
{
int i;
#pragma omp for schedule(static,3)
for (i=lower;i<upper;i+=stride)
{
printf("Iteration %2d is carried out by thread %2d\n",\
i, omp_get_thread_num());
}
}
int main(void)
{
#pragma omp parallel
{
#pragma omp single
printf ("Using %d threads.\n",omp_get_num_threads());
foo(0,10,2);
}
return 0;
}
|
relu.c | #include <omp.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
void print(const float *x, const int len)
{
assert(x && len>0);
for(int idx=0; idx<len; ++idx)
{
printf("%.2f ", (float)x[idx]);
}
printf("\n");
return;
}
float *relu(float *x, const int len, float *res)
{
assert(x && len>0 && res);
int nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
#pragma omp parallel for
for(int idx=0; idx<len; ++idx)
{
res[idx] = x[idx]>0 ? x[idx] : 0;
}
return res;
}
int main(int argc, char *argv[])
{
// init
const size_t n = 10;
float *x = calloc(10, sizeof(float));
memset(x, -1, n*sizeof(float)); // memset only can init value with 0 or -1
print(x, n);
// relu
x = relu(x, n, x);
print(x, n);
if(x) free(x); x = NULL;
return 0;
}
|
GB_convert_bitmap_worker.c | //------------------------------------------------------------------------------
// GB_convert_bitmap_worker: construct triplets or CSC/CSR from bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// TODO allow this function to do typecasting. Create 169 different versions
// for all 13x13 versions. Use this as part of Method 24, C=A assignment.
// Can also use typecasting for GB_Matrix_diag.
#include "GB.h"
#include "GB_partition.h"
GrB_Info GB_convert_bitmap_worker // extract CSC/CSR or triplets from bitmap
(
// outputs:
int64_t *restrict Ap, // vector pointers for CSC/CSR form
int64_t *restrict Ai, // indices for CSC/CSR or triplet form
int64_t *restrict Aj, // vector indices for triplet form
GB_void *restrict Ax_new, // values for CSC/CSR or triplet form
int64_t *anvec_nonempty, // # of non-empty vectors
// inputs: not modified
const GrB_Matrix A, // matrix to extract; not modified
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (GB_IS_BITMAP (A)) ;
ASSERT (Ap != NULL) ; // must be provided on input, size avdim+1
int64_t *restrict W = NULL ; size_t W_size = 0 ;
const int64_t avdim = A->vdim ;
const int64_t avlen = A->vlen ;
const size_t asize = A->type->size ;
//--------------------------------------------------------------------------
// count the entries in each vector
//--------------------------------------------------------------------------
const int8_t *restrict Ab = A->b ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (avlen*avdim, chunk, nthreads_max) ;
bool by_vector = (nthreads <= avdim) ;
if (by_vector)
{
//----------------------------------------------------------------------
// compute all vectors in parallel (no workspace)
//----------------------------------------------------------------------
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
// ajnz = nnz (A (:,j))
int64_t ajnz = 0 ;
int64_t pA_start = j * avlen ;
for (int64_t i = 0 ; i < avlen ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
ajnz += Ab [p] ;
ASSERT (Ab [p] == 0 || Ab [p] == 1) ;
}
Ap [j] = ajnz ;
}
}
else
{
//----------------------------------------------------------------------
// compute blocks of rows in parallel
//----------------------------------------------------------------------
// allocate one row of W per thread, each row of length avdim
W = GB_MALLOC_WERK (nthreads * avdim, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, avlen, taskid, nthreads) ;
for (int64_t j = 0 ; j < avdim ; j++)
{
// ajnz = nnz (A (istart:iend-1,j))
int64_t ajnz = 0 ;
int64_t pA_start = j * avlen ;
for (int64_t i = istart ; i < iend ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
ajnz += Ab [p] ;
ASSERT (Ab [p] == 0 || Ab [p] == 1) ;
}
Wtask [j] = ajnz ;
}
}
// cumulative sum to compute nnz(A(:,j)) for each vector j
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
int64_t ajnz = 0 ;
for (int taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t c = Wtask [j] ;
Wtask [j] = ajnz ;
ajnz += c ;
}
Ap [j] = ajnz ;
}
}
//--------------------------------------------------------------------------
// cumulative sum of Ap
//--------------------------------------------------------------------------
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
GB_cumsum (Ap, avdim, anvec_nonempty, nth, Context) ;
int64_t anz = Ap [avdim] ;
ASSERT (anz == A->nvals) ;
//--------------------------------------------------------------------------
// gather the pattern and values from the bitmap
//--------------------------------------------------------------------------
// TODO: add type-specific versions for built-in types
const GB_void *restrict Ax = (GB_void *) (A->x) ;
if (by_vector)
{
//----------------------------------------------------------------------
// construct all vectors in parallel (no workspace)
//----------------------------------------------------------------------
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < avdim ; j++)
{
// gather from the bitmap into the new A (:,j)
int64_t pnew = Ap [j] ;
int64_t pA_start = j * avlen ;
for (int64_t i = 0 ; i < avlen ; i++)
{
int64_t p = i + pA_start ;
if (Ab [p])
{
// A(i,j) is in the bitmap
if (Ai != NULL) Ai [pnew] = i ;
if (Aj != NULL) Aj [pnew] = j ;
if (Ax_new != NULL)
{
// Ax_new [pnew] = Ax [p])
memcpy (Ax_new +(pnew)*asize, Ax +(p)*asize, asize) ;
}
pnew++ ;
}
}
ASSERT (pnew == Ap [j+1]) ;
}
}
else
{
//----------------------------------------------------------------------
// compute blocks of rows in parallel
//----------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (taskid = 0 ; taskid < nthreads ; taskid++)
{
int64_t *restrict Wtask = W + taskid * avdim ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, avlen, taskid, nthreads) ;
for (int64_t j = 0 ; j < avdim ; j++)
{
// gather from the bitmap into the new A (:,j)
int64_t pnew = Ap [j] + Wtask [j] ;
int64_t pA_start = j * avlen ;
for (int64_t i = istart ; i < iend ; i++)
{
// see if A(i,j) is present in the bitmap
int64_t p = i + pA_start ;
if (Ab [p])
{
// A(i,j) is in the bitmap
if (Ai != NULL) Ai [pnew] = i ;
if (Aj != NULL) Aj [pnew] = j ;
if (Ax_new != NULL)
{
// Ax_new [pnew] = Ax [p] ;
memcpy (Ax_new +(pnew)*asize, Ax +(p)*asize, asize);
}
pnew++ ;
}
}
}
}
}
//--------------------------------------------------------------------------
// free workspace return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
return (GrB_SUCCESS) ;
}
|
convolution_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[8][8][4];
__m128 _v5_25 = _mm_set1_ps(5.25f);
__m128 _vm4_25 = _mm_set1_ps(-4.25f);
__m128 _vm1_25 = _mm_set1_ps(-1.25f);
__m128 _v0_25 = _mm_set1_ps(0.25f);
__m128 _vm2_5 = _mm_set1_ps(-2.5f);
__m128 _v0_5 = _mm_set1_ps(0.5f);
__m128 _v2 = _mm_set1_ps(2.f);
__m128 _v4 = _mm_set1_ps(4.f);
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _r05 = _mm_load_ps(r0 + 4 * 5);
__m128 _r06 = _mm_load_ps(r0 + 4 * 6);
__m128 _r07 = _mm_load_ps(r0 + 4 * 7);
__m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06));
__m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[7][m], _tmp7m);
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05));
__m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5)));
__m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[4][m], _tmp4m);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2)));
__m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(tmp[5][m], _tmp5m);
_mm_store_ps(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6;
float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7;
for (int m = 0; m < 8; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp06 = _mm_load_ps(tmp[m][6]);
__m128 _tmp07 = _mm_load_ps(tmp[m][7]);
__m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06));
__m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01));
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05));
__m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5)));
__m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2)));
__m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
_mm_store_ps(r0_tm_4, _r0tm4);
_mm_store_ps(r0_tm_5, _r0tm5);
_mm_store_ps(r0_tm_6, _r0tm6);
_mm_store_ps(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 4 * 8;
r0_tm_1 += tiles * 4 * 8;
r0_tm_2 += tiles * 4 * 8;
r0_tm_3 += tiles * 4 * 8;
r0_tm_4 += tiles * 4 * 8;
r0_tm_5 += tiles * 4 * 8;
r0_tm_6 += tiles * 4 * 8;
r0_tm_7 += tiles * 4 * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x12
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
__m128 _r8 = _mm_load_ps(r0 + 4 * 8);
__m128 _r9 = _mm_load_ps(r0 + 4 * 9);
__m128 _ra = _mm_load_ps(r0 + 4 * 10);
__m128 _rb = _mm_load_ps(r0 + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r8);
_mm_store_ps(tmpptr + 4 * 3, _r1);
_mm_store_ps(tmpptr + 4 * 4, _r5);
_mm_store_ps(tmpptr + 4 * 5, _r9);
_mm_store_ps(tmpptr + 4 * 6, _r2);
_mm_store_ps(tmpptr + 4 * 7, _r6);
_mm_store_ps(tmpptr + 4 * 8, _ra);
_mm_store_ps(tmpptr + 4 * 9, _r3);
_mm_store_ps(tmpptr + 4 * 10, _r7);
_mm_store_ps(tmpptr + 4 * 11, _rb);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r1);
_mm_store_ps(tmpptr + 4 * 3, _r5);
_mm_store_ps(tmpptr + 4 * 4, _r2);
_mm_store_ps(tmpptr + 4 * 5, _r6);
_mm_store_ps(tmpptr + 4 * 6, _r3);
_mm_store_ps(tmpptr + 4 * 7, _r7);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r1);
_mm_store_ps(tmpptr + 4 * 2, _r2);
_mm_store_ps(tmpptr + 4 * 3, _r3);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(tmpptr, _r01_0);
_mm_store_ps(tmpptr + 4, _r01_1);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
__m128 _val = _mm_load_ps(r0);
_mm_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
__m128 _sum8 = _mm_setzero_ps();
__m128 _sum9 = _mm_setzero_ps();
__m128 _suma = _mm_setzero_ps();
__m128 _sumb = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
__m128 _val8 = _mm_load1_ps(r0 + 8);
__m128 _val9 = _mm_load1_ps(r0 + 9);
__m128 _vala = _mm_load1_ps(r0 + 10);
__m128 _valb = _mm_load1_ps(r0 + 11);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
_sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9);
_suma = _mm_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
_mm_store_ps(output0_tm + 4 * 8, _sum8);
_mm_store_ps(output0_tm + 4 * 9, _sum9);
_mm_store_ps(output0_tm + 4 * 10, _suma);
_mm_store_ps(output0_tm + 4 * 11, _sumb);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
r0 += 1;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps();
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[6][8][4];
__m128 _v32 = _mm_set1_ps(32.f);
__m128 _v16 = _mm_set1_ps(16.f);
__m128 _v8 = _mm_set1_ps(8.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _v2 = _mm_set1_ps(2.f);
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7;
float* output0 = out0.row<float>(i * 6) + (j * 6) * 4;
// TODO msa optimize
for (int m = 0; m < 8; m++)
{
__m128 _out0tm0 = _mm_load_ps(output0_tm_0);
__m128 _out0tm1 = _mm_load_ps(output0_tm_1);
__m128 _out0tm2 = _mm_load_ps(output0_tm_2);
__m128 _out0tm3 = _mm_load_ps(output0_tm_3);
__m128 _out0tm4 = _mm_load_ps(output0_tm_4);
__m128 _out0tm5 = _mm_load_ps(output0_tm_5);
__m128 _out0tm6 = _mm_load_ps(output0_tm_6);
__m128 _out0tm7 = _mm_load_ps(output0_tm_7);
__m128 _tmp024a = _mm_add_ps(_out0tm1, _out0tm2);
__m128 _tmp135a = _mm_sub_ps(_out0tm1, _out0tm2);
__m128 _tmp024b = _mm_add_ps(_out0tm3, _out0tm4);
__m128 _tmp135b = _mm_sub_ps(_out0tm3, _out0tm4);
__m128 _tmp024c = _mm_add_ps(_out0tm5, _out0tm6);
__m128 _tmp135c = _mm_sub_ps(_out0tm5, _out0tm6);
__m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b));
__m128 _tmp2m = _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a));
__m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[4][m], _tmp4m);
__m128 _tmp1m = _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a));
__m128 _tmp3m = _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a));
__m128 _tmp5m = _mm_add_ps(_mm_add_ps(_out0tm7, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c));
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 4 * 8;
output0_tm_1 += tiles * 4 * 8;
output0_tm_2 += tiles * 4 * 8;
output0_tm_3 += tiles * 4 * 8;
output0_tm_4 += tiles * 4 * 8;
output0_tm_5 += tiles * 4 * 8;
output0_tm_6 += tiles * 4 * 8;
output0_tm_7 += tiles * 4 * 8;
}
for (int m = 0; m < 6; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp06 = _mm_load_ps(tmp[m][6]);
__m128 _tmp07 = _mm_load_ps(tmp[m][7]);
__m128 _tmp024a = _mm_add_ps(_tmp01, _tmp02);
__m128 _tmp135a = _mm_sub_ps(_tmp01, _tmp02);
__m128 _tmp024b = _mm_add_ps(_tmp03, _tmp04);
__m128 _tmp135b = _mm_sub_ps(_tmp03, _tmp04);
__m128 _tmp024c = _mm_add_ps(_tmp05, _tmp06);
__m128 _tmp135c = _mm_sub_ps(_tmp05, _tmp06);
__m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp024a), _mm_comp_fmadd_ps(_v32, _tmp024c, _tmp024b)));
__m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp024c, _mm_comp_fmadd_ps(_v4, _tmp024b, _tmp024a)));
__m128 _out04 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp024c, _mm_comp_fmadd_ps(_v16, _tmp024b, _tmp024a)));
_mm_store_ps(output0, _out00);
_mm_store_ps(output0 + 4 * 2, _out02);
_mm_store_ps(output0 + 4 * 4, _out04);
__m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v16, _tmp135c, _mm_comp_fmadd_ps(_v2, _tmp135b, _tmp135a)));
__m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp135c, _mm_comp_fmadd_ps(_v8, _tmp135b, _tmp135a)));
__m128 _out05 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp07, _tmp135a), _mm_comp_fmadd_ps(_v32, _tmp135b, _tmp135c)));
_mm_store_ps(output0 + 4, _out01);
_mm_store_ps(output0 + 4 * 3, _out03);
_mm_store_ps(output0 + 4 * 5, _out05);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[6][6][4];
__m128 _vm5 = _mm_set1_ps(-5.f);
__m128 _vm4 = _mm_set1_ps(-4.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _vm2 = _mm_set1_ps(-2.f);
__m128 _v2 = _mm_set1_ps(2.f);
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _r05 = _mm_load_ps(r0 + 4 * 5);
__m128 _tmp0m = _mm_comp_fmadd_ps(_vm5, _r02, _mm_comp_fmadd_ps(_v4, _r00, _r04));
__m128 _tmp1m = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_r01, _r02), _mm_add_ps(_r04, _r03));
__m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_r01, _r02), _mm_sub_ps(_r04, _r03));
__m128 _tmp3m = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02));
__m128 _tmp4m = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_r01, _r03), _mm_sub_ps(_r04, _r02));
__m128 _tmp5m = _mm_comp_fmadd_ps(_vm5, _r03, _mm_comp_fmadd_ps(_v4, _r01, _r05));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[4][m], _tmp4m);
_mm_store_ps(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
for (int m = 0; m < 6; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _r0tm0 = _mm_comp_fmadd_ps(_vm5, _tmp02, _mm_comp_fmadd_ps(_v4, _tmp00, _tmp04));
__m128 _r0tm1 = _mm_comp_fmadd_ps(_vm4, _mm_add_ps(_tmp01, _tmp02), _mm_add_ps(_tmp04, _tmp03));
__m128 _r0tm2 = _mm_comp_fmadd_ps(_v4, _mm_sub_ps(_tmp01, _tmp02), _mm_sub_ps(_tmp04, _tmp03));
__m128 _r0tm3 = _mm_comp_fmadd_ps(_vm2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02));
__m128 _r0tm4 = _mm_comp_fmadd_ps(_v2, _mm_sub_ps(_tmp01, _tmp03), _mm_sub_ps(_tmp04, _tmp02));
__m128 _r0tm5 = _mm_comp_fmadd_ps(_vm5, _tmp03, _mm_comp_fmadd_ps(_v4, _tmp01, _tmp05));
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
_mm_store_ps(r0_tm_4, _r0tm4);
_mm_store_ps(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 4 * 6;
r0_tm_1 += tiles * 4 * 6;
r0_tm_2 += tiles * 4 * 6;
r0_tm_3 += tiles * 4 * 6;
r0_tm_4 += tiles * 4 * 6;
r0_tm_5 += tiles * 4 * 6;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x12
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
__m128 _r8 = _mm_load_ps(r0 + 4 * 8);
__m128 _r9 = _mm_load_ps(r0 + 4 * 9);
__m128 _ra = _mm_load_ps(r0 + 4 * 10);
__m128 _rb = _mm_load_ps(r0 + 4 * 11);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r8);
_mm_store_ps(tmpptr + 4 * 3, _r1);
_mm_store_ps(tmpptr + 4 * 4, _r5);
_mm_store_ps(tmpptr + 4 * 5, _r9);
_mm_store_ps(tmpptr + 4 * 6, _r2);
_mm_store_ps(tmpptr + 4 * 7, _r6);
_mm_store_ps(tmpptr + 4 * 8, _ra);
_mm_store_ps(tmpptr + 4 * 9, _r3);
_mm_store_ps(tmpptr + 4 * 10, _r7);
_mm_store_ps(tmpptr + 4 * 11, _rb);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r1);
_mm_store_ps(tmpptr + 4 * 3, _r5);
_mm_store_ps(tmpptr + 4 * 4, _r2);
_mm_store_ps(tmpptr + 4 * 5, _r6);
_mm_store_ps(tmpptr + 4 * 6, _r3);
_mm_store_ps(tmpptr + 4 * 7, _r7);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r1);
_mm_store_ps(tmpptr + 4 * 2, _r2);
_mm_store_ps(tmpptr + 4 * 3, _r3);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1);
__m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1);
_mm_store_ps(tmpptr, _r01_0);
_mm_store_ps(tmpptr + 4, _r01_1);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
__m128 _val = _mm_load_ps(r0);
_mm_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
__m128 _sum8 = _mm_setzero_ps();
__m128 _sum9 = _mm_setzero_ps();
__m128 _suma = _mm_setzero_ps();
__m128 _sumb = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
__m128 _val8 = _mm_load1_ps(r0 + 8);
__m128 _val9 = _mm_load1_ps(r0 + 9);
__m128 _vala = _mm_load1_ps(r0 + 10);
__m128 _valb = _mm_load1_ps(r0 + 11);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
_sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9);
_suma = _mm_comp_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
_mm_store_ps(output0_tm + 4 * 8, _sum8);
_mm_store_ps(output0_tm + 4 * 9, _sum9);
_mm_store_ps(output0_tm + 4 * 10, _suma);
_mm_store_ps(output0_tm + 4 * 11, _sumb);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _val4 = _mm_load1_ps(r0 + 4);
__m128 _val5 = _mm_load1_ps(r0 + 5);
__m128 _val6 = _mm_load1_ps(r0 + 6);
__m128 _val7 = _mm_load1_ps(r0 + 7);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
_mm_store_ps(output0_tm + 4 * 4, _sum4);
_mm_store_ps(output0_tm + 4 * 5, _sum5);
_mm_store_ps(output0_tm + 4 * 6, _sum6);
_mm_store_ps(output0_tm + 4 * 7, _sum7);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _val3 = _mm_load1_ps(r0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
_mm_store_ps(output0_tm + 4 * 2, _sum2);
_mm_store_ps(output0_tm + 4 * 3, _sum3);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
__m128 _val1 = _mm_load1_ps(r0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum0);
_mm_store_ps(output0_tm + 4, _sum1);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 4; // inch always > 0
__m128 _sum = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _w0 = _mm_load_ps(k0);
__m128 _val0 = _mm_load1_ps(r0);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
r0 += 1;
k0 += 4;
}
_mm_store_ps(output0_tm, _sum);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_setzero_ps();
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[4][6][4];
__m128 _v2 = _mm_set1_ps(2.f);
__m128 _v4 = _mm_set1_ps(4.f);
__m128 _v8 = _mm_set1_ps(8.f);
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
float* output0 = out0.row<float>(i * 4) + (j * 4) * 4;
// TODO msa optimize
for (int m = 0; m < 6; m++)
{
__m128 _out0tm0 = _mm_load_ps(output0_tm_0);
__m128 _out0tm1 = _mm_load_ps(output0_tm_1);
__m128 _out0tm2 = _mm_load_ps(output0_tm_2);
__m128 _out0tm3 = _mm_load_ps(output0_tm_3);
__m128 _out0tm4 = _mm_load_ps(output0_tm_4);
__m128 _out0tm5 = _mm_load_ps(output0_tm_5);
__m128 _tmp02a = _mm_add_ps(_out0tm1, _out0tm2);
__m128 _tmp13a = _mm_sub_ps(_out0tm1, _out0tm2);
__m128 _tmp02b = _mm_add_ps(_out0tm3, _out0tm4);
__m128 _tmp13b = _mm_sub_ps(_out0tm3, _out0tm4);
__m128 _tmp0m = _mm_add_ps(_mm_add_ps(_out0tm0, _tmp02a), _tmp02b);
__m128 _tmp1m = _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a);
__m128 _tmp2m = _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a);
__m128 _tmp3m = _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_out0tm5, _tmp13a));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
_mm_store_ps(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 4 * 6;
output0_tm_1 += tiles * 4 * 6;
output0_tm_2 += tiles * 4 * 6;
output0_tm_3 += tiles * 4 * 6;
output0_tm_4 += tiles * 4 * 6;
output0_tm_5 += tiles * 4 * 6;
}
for (int m = 0; m < 4; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp02a = _mm_add_ps(_tmp01, _tmp02);
__m128 _tmp13a = _mm_sub_ps(_tmp01, _tmp02);
__m128 _tmp02b = _mm_add_ps(_tmp03, _tmp04);
__m128 _tmp13b = _mm_sub_ps(_tmp03, _tmp04);
__m128 _out00 = _mm_add_ps(_bias0, _mm_add_ps(_mm_add_ps(_tmp00, _tmp02a), _tmp02b));
__m128 _out01 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v2, _tmp13b, _tmp13a));
__m128 _out02 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v4, _tmp02b, _tmp02a));
__m128 _out03 = _mm_add_ps(_bias0, _mm_comp_fmadd_ps(_v8, _tmp13b, _mm_add_ps(_tmp05, _tmp13a)));
_mm_store_ps(output0, _out00);
_mm_store_ps(output0 + 4, _out01);
_mm_store_ps(output0 + 4 * 2, _out02);
_mm_store_ps(output0 + 4 * 3, _out03);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
critical.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define ARRAY_SIZE 512
uint32_t a[ARRAY_SIZE] = {0};
uint32_t b[ARRAY_SIZE] = {0};
uint32_t c[ARRAY_SIZE] = {0};
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
#pragma omp parallel
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp for
for (int i=0; i<ARRAY_SIZE; i++)
{
a[i] = 2 * i;
b[i] = 3 * i;
}
for(volatile int i = 0; i < (10000 << omp_get_thread_num()); i++);
#pragma omp barrier
#pragma omp for
for (int i=0; i<ARRAY_SIZE; i++)
{
c[i] = a[i] + b[i];
printf("[%d %d] c[%d]: %d\n", pi_cluster_id(), omp_get_thread_num(), i, c[i]);
}
#pragma omp barrier
#pragma omp critical
{
uint32_t sum = 0;
for (int i=0; i<ARRAY_SIZE; i++)
{
sum += c[i];
c[i] += i;
}
printf("Core sum %d: %d\n", pi_core_id(), sum);
}
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev;
struct pi_cluster_conf cl_conf;
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task;
pi_cluster_send_task_to_cl(&cluster_dev, pi_cluster_task(&cl_task, cluster_delegate, NULL));
pi_cluster_close(&cluster_dev);
printf("Test success !\n");
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
BF_std.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2008,2010,2011,2013 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* A public domain version of this code, with reentrant and crypt(3)
* interfaces added, but optimizations specific to password cracking
* removed, is available at:
*
* http://www.openwall.com/crypt/
*
* This implementation is compatible with OpenBSD bcrypt.c (version 2a)
* by Niels Provos <provos at citi.umich.edu>, and uses some of his
* ideas. The password hashing algorithm was designed by David Mazieres
* <dm at lcs.mit.edu>.
*
* There's a paper on the algorithm that explains its design decisions:
*
* http://www.usenix.org/events/usenix99/provos.html
*
* Some of the tricks in BF_ROUND might be inspired by Eric Young's
* Blowfish library (I can't be sure if I would think of something if I
* hadn't seen his code).
*/
#include <stdlib.h>
#include <string.h>
#include "arch.h"
#include "common.h"
#include "BF_std.h"
#include "memdbg.h"
BF_binary BF_out[BF_N];
/* Number of Blowfish rounds, this is also hardcoded into a few places */
#define BF_ROUNDS 16
typedef BF_word BF_key[BF_ROUNDS + 2];
struct BF_ctx {
BF_word S[4][0x100];
BF_key P;
};
#if BF_N > 1
#define INDICES [BF_N]
#define INDEX [index]
#define INDEX0 [index]
#define for_each_index() \
for (index = 0; index < BF_N; index++)
#else
#define INDICES
#define INDEX
#define INDEX0 [0]
#define for_each_index()
#endif
#if BF_X2 == 3
#if BF_mt > 1
#define INDEX2 [lindex]
#else
#define INDEX2 [index]
#endif
#elif BF_X2
#if BF_mt > 1
#define INDEX2 [index & 1]
#else
#define INDEX2 [index]
#endif
#else
#define INDEX2
#endif
#if BF_mt > 1
#if BF_X2 == 3
#define for_each_t() \
for (t = 0; t < n; t += 3)
#define for_each_ti() \
for (index = t, lindex = 0; lindex < 3; index++, lindex++)
#elif BF_X2
#define for_each_t() \
for (t = 0; t < n; t += 2)
#define for_each_ti() \
for (index = t; index <= t + 1; index++)
#else
#define for_each_t() \
for (t = 0; t < n; t++)
#define for_each_ti() \
index = t;
#endif
#else
#define for_each_t()
#define for_each_ti() \
for_each_index()
#endif
#if BF_mt == 1
/* Current Blowfish context */
#if BF_ASM
extern
#else
static
#endif
struct BF_ctx CC_CACHE_ALIGN BF_current INDICES;
#endif
/* Current Blowfish key */
static BF_key CC_CACHE_ALIGN BF_exp_key INDICES;
#if defined(__linux__) && defined(__sparc__)
static BF_key BF_init_key INDICES;
#else
static BF_key CC_CACHE_ALIGN BF_init_key INDICES;
#endif
/*
* Magic IV for 64 Blowfish encryptions that we do at the end.
* The string is "OrpheanBeholderScryDoubt" on big-endian.
*/
static BF_word BF_magic_w[6] = {
0x4F727068, 0x65616E42, 0x65686F6C,
0x64657253, 0x63727944, 0x6F756274
};
/*
* P-box and S-box tables initialized with digits of Pi.
*/
static struct BF_ctx BF_init_state = {
{
{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a
}, {
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7
}, {
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0
}, {
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6
}
}, {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b
}
};
/*
* Same charset, different order -- can't use the common.c table here.
*/
unsigned char BF_atoi64[0x80] = {
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 0, 1,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 64, 64, 64, 64, 64,
64, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 64, 64, 64, 64, 64,
64, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 64, 64, 64, 64, 64
};
#if ARCH_LITTLE_ENDIAN
static void BF_swap(BF_word *x, int count)
{
BF_word tmp;
do {
tmp = *x;
tmp = (tmp << 16) | (tmp >> 16);
*x++ = ((tmp & 0x00FF00FF) << 8) | ((tmp >> 8) & 0x00FF00FF);
} while (--count);
}
#else
#define BF_swap(x, count)
#endif
#if BF_SCALE
/* Architectures that can shift addresses left by 2 bits with no extra cost */
#define BF_ROUND(ctx, L, R, N, tmp1, tmp2, tmp3, tmp4) \
tmp1 = L & 0xFF; \
tmp2 = L >> 8; \
tmp2 &= 0xFF; \
tmp3 = L >> 16; \
tmp3 &= 0xFF; \
tmp4 = L >> 24; \
tmp1 = ctx.S[3][tmp1]; \
tmp2 = ctx.S[2][tmp2]; \
tmp3 = ctx.S[1][tmp3]; \
tmp3 += ctx.S[0][tmp4]; \
tmp3 ^= tmp2; \
R ^= ctx.P[N + 1]; \
tmp3 += tmp1; \
R ^= tmp3;
#else
/* Architectures with no complicated addressing modes supported */
#define BF_INDEX(S, i) \
(*((BF_word *)(((unsigned char *)S) + (i))))
#define BF_ROUND(ctx, L, R, N, tmp1, tmp2, tmp3, tmp4) \
tmp1 = L & 0xFF; \
tmp1 <<= 2; \
tmp2 = L >> 6; \
tmp2 &= 0x3FC; \
tmp3 = L >> 14; \
tmp3 &= 0x3FC; \
tmp4 = L >> 22; \
tmp4 &= 0x3FC; \
tmp1 = BF_INDEX(ctx.S[3], tmp1); \
tmp2 = BF_INDEX(ctx.S[2], tmp2); \
tmp3 = BF_INDEX(ctx.S[1], tmp3); \
tmp3 += BF_INDEX(ctx.S[0], tmp4); \
tmp3 ^= tmp2; \
R ^= ctx.P[N + 1]; \
tmp3 += tmp1; \
R ^= tmp3;
#endif
/*
* Encrypt one block, BF_ROUNDS is hardcoded here.
*/
#define BF_ENCRYPT(ctx, L, R) \
L ^= ctx.P[0]; \
BF_ROUND(ctx, L, R, 0, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 1, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 2, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 3, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 4, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 5, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 6, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 7, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 8, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 9, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 10, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 11, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 12, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 13, u1, u2, u3, u4); \
BF_ROUND(ctx, L, R, 14, u1, u2, u3, u4); \
BF_ROUND(ctx, R, L, 15, u1, u2, u3, u4); \
u4 = R; \
R = L; \
L = u4 ^ ctx.P[BF_ROUNDS + 1];
#if BF_ASM
extern void (*BF_body)(void);
#else
#if BF_X2 == 3
/*
* Encrypt three blocks in parallel. BF_ROUNDS is hardcoded here.
*/
#define BF_ENCRYPT2 \
L0 ^= BF_current[0].P[0]; \
L1 ^= BF_current[1].P[0]; \
L2 ^= BF_current[2].P[0]; \
BF_ROUND(BF_current[0], L0, R0, 0, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 0, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 0, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 1, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 1, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 1, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 2, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 2, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 2, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 3, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 3, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 3, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 4, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 4, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 4, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 5, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 5, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 5, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 6, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 6, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 6, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 7, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 7, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 7, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 8, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 8, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 8, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 9, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 9, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 9, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 10, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 10, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 10, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 11, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 11, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 11, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 12, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 12, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 12, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 13, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 13, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 13, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], L0, R0, 14, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 14, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], L2, R2, 14, w1, w2, w3, w4); \
BF_ROUND(BF_current[0], R0, L0, 15, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 15, v1, v2, v3, v4); \
BF_ROUND(BF_current[2], R2, L2, 15, w1, w2, w3, w4); \
u4 = R0; \
v4 = R1; \
w4 = R2; \
R0 = L0; \
R1 = L1; \
R2 = L2; \
L0 = u4 ^ BF_current[0].P[BF_ROUNDS + 1]; \
L1 = v4 ^ BF_current[1].P[BF_ROUNDS + 1]; \
L2 = w4 ^ BF_current[2].P[BF_ROUNDS + 1];
#define BF_body() \
L0 = R0 = L1 = R1 = L2 = R2 = 0; \
ptr = BF_current[0].P; \
do { \
BF_ENCRYPT2; \
*ptr = L0; \
*(ptr + 1) = R0; \
*(ptr + (BF_current[1].P - BF_current[0].P)) = L1; \
*(ptr + (BF_current[1].P - BF_current[0].P) + 1) = R1; \
*(ptr + (BF_current[2].P - BF_current[0].P)) = L2; \
*(ptr + (BF_current[2].P - BF_current[0].P) + 1) = R2; \
ptr += 2; \
} while (ptr < &BF_current[0].P[BF_ROUNDS + 2]); \
\
ptr = BF_current[0].S[0]; \
do { \
ptr += 2; \
BF_ENCRYPT2; \
*(ptr - 2) = L0; \
*(ptr - 1) = R0; \
*(ptr - 2 + (BF_current[1].S[0] - BF_current[0].S[0])) = L1; \
*(ptr - 1 + (BF_current[1].S[0] - BF_current[0].S[0])) = R1; \
*(ptr - 2 + (BF_current[2].S[0] - BF_current[0].S[0])) = L2; \
*(ptr - 1 + (BF_current[2].S[0] - BF_current[0].S[0])) = R2; \
} while (ptr < &BF_current[0].S[3][0xFF]);
#elif BF_X2
/*
* Encrypt two blocks in parallel. BF_ROUNDS is hardcoded here.
*/
#define BF_ENCRYPT2 \
L0 ^= BF_current[0].P[0]; \
L1 ^= BF_current[1].P[0]; \
BF_ROUND(BF_current[0], L0, R0, 0, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 0, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 1, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 1, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 2, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 2, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 3, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 3, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 4, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 4, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 5, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 5, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 6, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 6, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 7, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 7, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 8, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 8, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 9, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 9, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 10, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 10, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 11, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 11, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 12, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 12, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 13, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 13, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], L0, R0, 14, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], L1, R1, 14, v1, v2, v3, v4); \
BF_ROUND(BF_current[0], R0, L0, 15, u1, u2, u3, u4); \
BF_ROUND(BF_current[1], R1, L1, 15, v1, v2, v3, v4); \
u4 = R0; \
v4 = R1; \
R0 = L0; \
R1 = L1; \
L0 = u4 ^ BF_current[0].P[BF_ROUNDS + 1]; \
L1 = v4 ^ BF_current[1].P[BF_ROUNDS + 1];
#define BF_body() \
L0 = R0 = L1 = R1 = 0; \
ptr = BF_current[0].P; \
do { \
BF_ENCRYPT2; \
*ptr = L0; \
*(ptr + 1) = R0; \
*(ptr + (BF_current[1].P - BF_current[0].P)) = L1; \
*(ptr + (BF_current[1].P - BF_current[0].P) + 1) = R1; \
ptr += 2; \
} while (ptr < &BF_current[0].P[BF_ROUNDS + 2]); \
\
ptr = BF_current[0].S[0]; \
do { \
ptr += 2; \
BF_ENCRYPT2; \
*(ptr - 2) = L0; \
*(ptr - 1) = R0; \
*(ptr - 2 + (BF_current[1].S[0] - BF_current[0].S[0])) = L1; \
*(ptr - 1 + (BF_current[1].S[0] - BF_current[0].S[0])) = R1; \
} while (ptr < &BF_current[0].S[3][0xFF]);
#else
#define BF_body() \
L0 = R0 = 0; \
ptr = BF_current.P; \
do { \
BF_ENCRYPT(BF_current, L0, R0); \
*ptr = L0; \
*(ptr + 1) = R0; \
ptr += 2; \
} while (ptr < &BF_current.P[BF_ROUNDS + 2]); \
\
ptr = BF_current.S[0]; \
do { \
ptr += 2; \
BF_ENCRYPT(BF_current, L0, R0); \
*(ptr - 2) = L0; \
*(ptr - 1) = R0; \
} while (ptr < &BF_current.S[3][0xFF]);
#endif
#endif
void BF_std_set_key(char *key, int index, int sign_extension_bug)
{
char *ptr = key;
int i, j;
BF_word tmp;
for (i = 0; i < BF_ROUNDS + 2; i++) {
tmp = 0;
for (j = 0; j < 4; j++) {
tmp <<= 8;
if (sign_extension_bug)
tmp |= (int)(signed char)*ptr;
else
tmp |= (unsigned char)*ptr;
if (!*ptr) ptr = key; else ptr++;
}
BF_exp_key INDEX[i] = tmp;
BF_init_key INDEX[i] = BF_init_state.P[i] ^ tmp;
}
}
void BF_std_crypt(BF_salt *salt, int n)
{
#if BF_mt > 1
int t;
#endif
#if BF_mt > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(t) shared(n, BF_init_state, BF_init_key, BF_exp_key, salt, BF_magic_w, BF_out)
#endif
for_each_t() {
#if BF_mt > 1
#if BF_X2 == 3
struct BF_ctx BF_current[3];
#elif BF_X2
struct BF_ctx BF_current[2];
#else
struct BF_ctx BF_current;
#endif
#endif
BF_word L0, R0;
BF_word u1, u2, u3, u4;
#if BF_X2
BF_word L1, R1;
BF_word v1, v2, v3, v4;
#if BF_X2 == 3
BF_word L2, R2;
BF_word w1, w2, w3, w4;
#endif
#endif
BF_word *ptr;
BF_word count;
#if BF_N > 1
int index;
#endif
#if BF_X2 == 3 && BF_mt > 1
int lindex;
#endif
for_each_ti() {
int i;
memcpy(BF_current INDEX2.S,
BF_init_state.S, sizeof(BF_current INDEX2.S));
memcpy(BF_current INDEX2.P,
BF_init_key INDEX, sizeof(BF_current INDEX2.P));
L0 = R0 = 0;
for (i = 0; i < BF_ROUNDS + 2; i += 2) {
L0 ^= salt->salt[i & 2];
R0 ^= salt->salt[(i & 2) + 1];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
BF_current INDEX2.P[i] = L0;
BF_current INDEX2.P[i + 1] = R0;
}
ptr = BF_current INDEX2.S[0];
do {
ptr += 4;
L0 ^= salt->salt[(BF_ROUNDS + 2) & 3];
R0 ^= salt->salt[(BF_ROUNDS + 3) & 3];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
*(ptr - 4) = L0;
*(ptr - 3) = R0;
L0 ^= salt->salt[(BF_ROUNDS + 4) & 3];
R0 ^= salt->salt[(BF_ROUNDS + 5) & 3];
BF_ENCRYPT(BF_current INDEX2, L0, R0);
*(ptr - 2) = L0;
*(ptr - 1) = R0;
} while (ptr < &BF_current INDEX2.S[3][0xFF]);
}
count = 1 << salt->rounds;
do {
for_each_ti() {
BF_current INDEX2.P[0] ^= BF_exp_key INDEX[0];
BF_current INDEX2.P[1] ^= BF_exp_key INDEX[1];
BF_current INDEX2.P[2] ^= BF_exp_key INDEX[2];
BF_current INDEX2.P[3] ^= BF_exp_key INDEX[3];
BF_current INDEX2.P[4] ^= BF_exp_key INDEX[4];
BF_current INDEX2.P[5] ^= BF_exp_key INDEX[5];
BF_current INDEX2.P[6] ^= BF_exp_key INDEX[6];
BF_current INDEX2.P[7] ^= BF_exp_key INDEX[7];
BF_current INDEX2.P[8] ^= BF_exp_key INDEX[8];
BF_current INDEX2.P[9] ^= BF_exp_key INDEX[9];
BF_current INDEX2.P[10] ^= BF_exp_key INDEX[10];
BF_current INDEX2.P[11] ^= BF_exp_key INDEX[11];
BF_current INDEX2.P[12] ^= BF_exp_key INDEX[12];
BF_current INDEX2.P[13] ^= BF_exp_key INDEX[13];
BF_current INDEX2.P[14] ^= BF_exp_key INDEX[14];
BF_current INDEX2.P[15] ^= BF_exp_key INDEX[15];
BF_current INDEX2.P[16] ^= BF_exp_key INDEX[16];
BF_current INDEX2.P[17] ^= BF_exp_key INDEX[17];
}
BF_body();
u1 = salt->salt[0];
u2 = salt->salt[1];
u3 = salt->salt[2];
u4 = salt->salt[3];
for_each_ti() {
BF_current INDEX2.P[0] ^= u1;
BF_current INDEX2.P[1] ^= u2;
BF_current INDEX2.P[2] ^= u3;
BF_current INDEX2.P[3] ^= u4;
BF_current INDEX2.P[4] ^= u1;
BF_current INDEX2.P[5] ^= u2;
BF_current INDEX2.P[6] ^= u3;
BF_current INDEX2.P[7] ^= u4;
BF_current INDEX2.P[8] ^= u1;
BF_current INDEX2.P[9] ^= u2;
BF_current INDEX2.P[10] ^= u3;
BF_current INDEX2.P[11] ^= u4;
BF_current INDEX2.P[12] ^= u1;
BF_current INDEX2.P[13] ^= u2;
BF_current INDEX2.P[14] ^= u3;
BF_current INDEX2.P[15] ^= u4;
BF_current INDEX2.P[16] ^= u1;
BF_current INDEX2.P[17] ^= u2;
}
BF_body();
} while (--count);
#if BF_mt == 1
for_each_ti() {
L0 = BF_magic_w[0];
R0 = BF_magic_w[1];
count = 64;
do {
BF_ENCRYPT(BF_current INDEX, L0, R0);
} while (--count);
BF_out INDEX0[0] = L0;
BF_out INDEX0[1] = R0;
}
#else
for_each_ti() {
BF_word L, R;
BF_word u1, u2, u3, u4;
BF_word count;
int i;
memcpy(&BF_out[index], &BF_magic_w,
sizeof(BF_out[index]));
count = 64;
do
for (i = 0; i < 6; i += 2) {
L = BF_out[index][i];
R = BF_out[index][i + 1];
BF_ENCRYPT(BF_current INDEX2, L, R);
BF_out[index][i] = L;
BF_out[index][i + 1] = R;
} while (--count);
/* This has to be bug-compatible with the original implementation :-) */
BF_out[index][5] &= ~(BF_word)0xFF;
}
#endif
}
}
#if BF_mt == 1
void BF_std_crypt_exact(int index)
{
BF_word L, R;
BF_word u1, u2, u3, u4;
BF_word count;
int i;
memcpy(&BF_out[index][2], &BF_magic_w[2], sizeof(BF_word) * 4);
count = 64;
do
for (i = 2; i < 6; i += 2) {
L = BF_out[index][i];
R = BF_out[index][i + 1];
BF_ENCRYPT(BF_current INDEX, L, R);
BF_out[index][i] = L;
BF_out[index][i + 1] = R;
} while (--count);
/* This has to be bug-compatible with the original implementation :-) */
BF_out[index][5] &= ~(BF_word)0xFF;
}
#endif
/*
* I'm not doing any error checking in the routines below since the
* ciphertext should have already been checked to be fmt_BF.valid().
*/
static void BF_decode(BF_word *dst, char *src, int size)
{
unsigned char *dptr = (unsigned char *)dst;
unsigned char *end = dptr + size;
unsigned char *sptr = (unsigned char *)src;
unsigned int c1, c2, c3, c4;
do {
c1 = BF_atoi64[ARCH_INDEX(*sptr++)];
c2 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = (c1 << 2) | ((c2 & 0x30) >> 4);
if (dptr >= end) break;
c3 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = ((c2 & 0x0F) << 4) | ((c3 & 0x3C) >> 2);
if (dptr >= end) break;
c4 = BF_atoi64[ARCH_INDEX(*sptr++)];
*dptr++ = ((c3 & 0x03) << 6) | c4;
} while (dptr < end);
}
void *BF_std_get_salt(char *ciphertext)
{
static BF_salt salt;
memset(&salt, 0, sizeof(salt));
BF_decode(salt.salt, &ciphertext[7], 16);
BF_swap(salt.salt, 4);
salt.rounds = atoi(&ciphertext[4]);
if (ciphertext[2] == 'a')
salt.subtype = 'y';
else
salt.subtype = ciphertext[2];
return &salt;
}
#if FMT_MAIN_VERSION > 11
/* For BF, the tunable cost parameter is the iteration count */
unsigned int BF_iteration_count(void *salt)
{
BF_salt *bf_salt;
bf_salt = (BF_salt *) salt;
return (unsigned int) (1 << bf_salt->rounds);
}
#endif
void *BF_std_get_binary(char *ciphertext)
{
static BF_binary binary;
binary[5] = 0;
BF_decode(binary, &ciphertext[29], 23);
BF_swap(binary, 6);
binary[5] &= ~(BF_word)0xFF;
return &binary;
}
|
GB_binop__pow_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint32)
// C=scalar+B GB (_bind1st__pow_uint32)
// C=scalar+B' GB (_bind1st_tran__pow_uint32)
// C=A+scalar GB (_bind2nd__pow_uint32)
// C=A'+scalar GB (_bind2nd_tran__pow_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_pow_uint32 (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT32 || GxB_NO_POW_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_dlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlauum.c, normal z -> d, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lauum
*
* Computes the product U * U^T or L^T * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular factor U or L.
* On exit, if uplo = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^T;
* if uplo = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^T * L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] info
* - 0 on successful exit
* - < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dlauum(plasma_enum_t uplo,
int n,
double *A, int lda)
{
return LAPACKE_dlauum_work(LAPACK_COL_MAJOR,
lapack_const(uplo), n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_dlauum(plasma_enum_t uplo,
int n,
double *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_dlauum(uplo, n, A, lda);
if (info != PlasmaSuccess) {
plasma_coreblas_error("core_dlauum() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
omp_bug5.c | /******************************************************************************
* FILE: omp_bug5.c
* DESCRIPTION:
* Using SECTIONS, two threads initialize their own array and then add
* it to the other's array, however a deadlock occurs.
* AUTHOR: Blaise Barney 01/29/04
* LAST REVISED: 08/15/11
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1000000
#define PI 3.1415926535
#define DELTA .01415926535
int main (int argc, char *argv[])
{
int nthreads, tid, i;
float a[N], b[N];
omp_lock_t locka, lockb;
/* Initialize the locks */
omp_init_lock(&locka);
omp_init_lock(&lockb);
/* Initialize the arrays */
for (i=0; i<N; i++) {
a[i]=0;
b[i]=0;
}
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i)
{
/* Obtain thread number and number of threads */
tid = omp_get_thread_num();
#pragma omp master
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n", tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += DELTA * i;
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += DELTA + i;
omp_unset_lock(&lockb);
omp_unset_lock(&locka);
}
#pragma omp section
{
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += PI * i;
omp_set_lock(&locka);
printf("Thread %d adding b[] to a[]\n",tid);
for (i=0; i<N; i++)
a[i] += PI + i;
omp_unset_lock(&locka);
omp_unset_lock(&lockb);
}
} /* end of sections */
} /* end of parallel region */
printf("Sample results: %f %f %f %f\n",a[0],b[0],a[999999],b[999999]);
}
|
packet-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file packet-inl.h
* \brief Generic packet vectorization code
*/
#ifndef MSHADOW_PACKET_INL_H_
#define MSHADOW_PACKET_INL_H_
#ifdef __APPLE__
#include <stdlib.h>
#else
#include <malloc.h>
#endif
#include "./base.h"
#include "./tensor.h"
#include "./expression.h"
namespace mshadow {
/*! \brief namespace of packet math*/
namespace packet {
enum PacketArch {
kPlain,
kSSE2,
};
#if MSHADOW_USE_SSE
#define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kSSE2
#else
#define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kPlain
#endif
// whether packet operator is enabled.
/*!
* \brief Generic packet type
* \tparam DType The data type of the packet.
* \tparam Arch the Arch of the packet.
*/
template<typename DType, PacketArch Arch = MSHADOW_DEFAULT_PACKET>
struct Packet;
template<PacketArch Arch>
struct AlignBytes {
static const index_t value = 4;
};
} // namespace packet
} // namespace mshadow
namespace mshadow {
namespace packet {
/*!
* \brief analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells
* \param out_pitch output parameter, the actuall space allocated for each line
* \param lspace number of cells required for each line
* \param num_line number of lines to be allocated
*/
inline void* AlignedMallocPitch(size_t *out_pitch,
size_t lspace,
size_t num_line) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t mask = (1 << bits) - 1;
size_t pitch = ((lspace + mask) >> bits) << bits;
*out_pitch = pitch;
#ifdef _MSC_VER
void *res = _aligned_malloc(pitch * num_line, 1 << bits);
#else
void *res;
int ret = posix_memalign(&res, 1 << bits, pitch * num_line);
CHECK_EQ(ret, 0) << "AlignedMallocPitch failed";
#endif
if (res == NULL) {
LOG(FATAL) << "AlignedMallocPitch failed";
}
return res;
}
/*!
* \brief free aligned space
* \param ptr pointer to space to be freed
*/
inline void AlignedFree(void *ptr) {
#ifdef _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
/*! \brief check if a pointer is aligned */
template<PacketArch Arch>
inline bool CheckAlign(size_t pitch) {
const index_t bits = AlignBytes<Arch>::value;
return !(pitch & ((1 << bits) - 1));
}
/*! \brief check if a pointer is aligned */
template<PacketArch Arch>
inline bool CheckAlign(void *ptr) {
return CheckAlign<Arch>(reinterpret_cast<size_t>(ptr));
}
/*!
* \brief get upper bound of aligned index of size
* \param size size of the array
* \param fsize size of float
*/
template<typename DType, PacketArch Arch>
inline index_t UpperAlign(index_t size) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t mask = (1 << bits) - 1;
const index_t fsize = sizeof(DType);
return (((size * fsize + mask) >> bits) << bits) / fsize;
}
/*!
* \brief get lower bound of aligned index of size
* \param size size of the array
* \param fsize size of float
*/
template<typename DType, PacketArch Arch>
inline index_t LowerAlign(index_t size) {
const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value;
const index_t fsize = sizeof(DType);
return (((size * fsize) >> bits) << bits) / fsize;
}
/*!
* \brief generic Packet operator
* \tparam OP The operator
* \tparam DType The data type
* \tparam Arch The architecture.
*/
template<typename OP, typename DType, PacketArch Arch>
struct PacketOp {
static const bool kEnabled = false;
};
// specialization of operators
template<typename DType, PacketArch Arch>
struct PacketOp<op::plus, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs + rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::minus, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs - rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::mul, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs * rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::div, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs,
const Packet<DType, Arch>& rhs) {
return lhs / rhs;
}
};
template<typename DType, PacketArch Arch>
struct PacketOp<op::identity, DType, Arch> {
static const bool kEnabled = true;
MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& src) {
return src;
}
};
// savers to do storage
template<typename SV, typename TFloat, PacketArch Arch>
struct Saver{
MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) {
Packet<TFloat, Arch> lhs = Packet<TFloat, Arch>::Load(dst);
Packet<TFloat, Arch> ans = PacketOp<typename SV::OPType, TFloat, Arch>::Map(lhs, src);
ans.Store(dst);
}
};
template<typename TFloat, PacketArch Arch>
struct Saver<sv::saveto, TFloat, Arch> {
MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) {
src.Store(dst);
}
};
} // namespace packet
} // namespace mshadow
#include "packet/plain-inl.h"
#if MSHADOW_USE_SSE && !defined(__CUDACC__)
#include "packet/sse-inl.h"
#endif
namespace mshadow {
namespace expr {
typedef packet::PacketArch PacketArch;
// same as plan, but use packet
template<typename ExpType, typename DType, PacketArch Arch>
class PacketPlan {
public:
/*!
* \brief evaluate the expression at index [y][x],
* x will be aligned to Packet<DType, Arch>::Size()
*/
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const;
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const;
};
template <typename Device, int dim, typename DType, PacketArch Arch>
class PacketPlan<Tensor<Device, dim, DType>, DType, Arch> {
public:
explicit PacketPlan(const Tensor<Device, dim, DType> &t)
:dptr_(t.dptr_), stride_(t.stride_) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::Packet<DType, Arch>::Load(&dptr_[y * stride_ + x]);
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return dptr_[y * stride_ + x];
}
private:
const DType *dptr_;
index_t stride_;
};
template<typename DType, PacketArch Arch>
class PacketPlan<ScalarExp<DType>, DType, Arch> {
public:
explicit PacketPlan(DType scalar) : scalar_(scalar) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::Packet<DType, Arch>::Fill(scalar_);
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return scalar_;
}
private:
DType scalar_;
};
template<typename OP, typename TA, typename TB, int etype, typename DType, PacketArch Arch>
class PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> {
public:
PacketPlan(const PacketPlan<TA, DType, Arch> &lhs, const PacketPlan<TB, DType, Arch> &rhs)
: lhs_(lhs), rhs_(rhs) {}
MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const {
return packet::PacketOp<OP, DType, Arch>::Map(lhs_.EvalPacket(y, x), rhs_.EvalPacket(y, x));
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return OP::Map(lhs_.Eval(y, x), rhs_.Eval(y, x));
}
private:
PacketPlan<TA, DType, Arch> lhs_;
PacketPlan<TB, DType, Arch> rhs_;
};
template<typename OP, typename TA, int etype, typename DType, PacketArch Arch>
class PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> {
public:
PacketPlan(const PacketPlan<TA, DType, Arch> &src) : src_(src) {}
MSHADOW_CINLINE packet::Packet<DType> EvalPacket(index_t y, index_t x) const {
return packet::PacketOp<OP, DType, Arch>::Map(src_.EvalPacket(y, x));
}
MSHADOW_CINLINE DType Eval(index_t y, index_t x) const {
return OP::Map(src_.Eval(y, x));
}
private:
PacketPlan<TA, DType, Arch> src_;
};
template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype>
inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>
MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e);
template<PacketArch Arch, typename DType>
inline PacketPlan<ScalarExp<DType>, DType, Arch> MakePacketPlan(const ScalarExp<DType> &e) {
return PacketPlan<ScalarExp<DType>, DType, Arch>(e.scalar_);
}
template<PacketArch Arch, typename T, typename DType>
inline PacketPlan<T, DType, Arch> MakePacketPlan(const RValueExp<T, DType> &e) {
return PacketPlan<T, DType, Arch>(e.self());
}
template<PacketArch Arch, typename T, int dim, typename DType>
inline PacketPlan<T, DType, Arch>
MakePacketPlan(const MakeTensorExp<T, cpu, dim, DType> &e) {
return PacketPlan<T, DType, Arch>(e.real_self());
}
template<PacketArch Arch, typename OP, typename TA, typename DType, int etype>
inline PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>
MakePacketPlan(const UnaryMapExp<OP, TA, DType, etype> &e) {
return PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.src_));
}
template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype>
inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>
MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e) {
return PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>,
DType, Arch>(MakePacketPlan<Arch>(e.lhs_), MakePacketPlan<Arch>(e.rhs_));
}
/*!
* \brief static check packet enable
*
* \tparam Device the type of Device
* \tparam dim dimension of the tensor
* \tparam E expression
*/
template<typename E, PacketArch Arch>
struct PacketCheck{
static const bool kPass = false;
};
template<PacketArch Arch>
struct PacketCheck<float, Arch> {
static const bool kPass = true;
};
template<PacketArch Arch>
struct PacketCheck<double, Arch> {
static const bool kPass = true;
};
template<typename DType, PacketArch Arch>
struct PacketCheck<ScalarExp<DType>, Arch> {
static const bool kPass = PacketCheck<DType, Arch>::kPass;
};
template<int dim, typename DType, PacketArch Arch>
struct PacketCheck<Tensor<cpu, dim, DType>, Arch> {
static const bool kPass = PacketCheck<DType, Arch>::kPass;
};
template<typename OP, typename TA, typename DType, int etype, PacketArch Arch>
struct PacketCheck<UnaryMapExp<OP, TA, DType, etype>, Arch> {
static const bool kPass = PacketCheck<TA, Arch>::kPass &&
packet::PacketOp<OP, DType, Arch>::kEnabled;
};
template<typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch>
struct PacketCheck< BinaryMapExp<OP, TA, TB, DType, etype>, Arch> {
static const bool kPass = packet::PacketOp<OP, DType, Arch>::kEnabled &&
PacketCheck<TA, Arch>::kPass && PacketCheck<TB, Arch>::kPass;
};
//----------------------------------------------------
// Check if data is aligned and allow packet operation
//----------------------------------------------------
template<int dim, typename E, PacketArch Arch>
struct PacketAlignCheck {
inline static bool Check(const E &exp) {
return false;
}
};
template<int dim, typename DType, PacketArch Arch>
struct PacketAlignCheck<dim, ScalarExp<DType>, Arch> {
inline static bool Check(const ScalarExp<DType> &exp) {
return true;
}
};
template<int dim, typename DType, PacketArch Arch>
struct PacketAlignCheck<dim, Tensor<cpu, dim, DType>, Arch> {
inline static bool Check(const Tensor<cpu, dim, DType> &t) {
return packet::CheckAlign<Arch>(t.dptr_) &&
packet::CheckAlign<Arch>(t.stride_ * sizeof(DType));
}
};
template<int dim, typename OP, typename TA, typename DType, int etype, PacketArch Arch>
struct PacketAlignCheck<dim, UnaryMapExp<OP, TA, DType, etype>, Arch> {
inline static bool Check(const UnaryMapExp<OP, TA, DType, etype> &t) {
return PacketAlignCheck<dim, TA, Arch>::Check(t.src_);
}
};
template<int dim, typename OP, typename TA, typename TB,
typename DType, int etype, PacketArch Arch>
struct PacketAlignCheck<dim, BinaryMapExp<OP, TA, TB, DType, etype>, Arch> {
inline static bool Check(const BinaryMapExp<OP, TA, TB, DType, etype> &t) {
return PacketAlignCheck<dim, TA, Arch>::Check(t.lhs_) &&
PacketAlignCheck<dim, TB, Arch>::Check(t.rhs_);
}
};
/*!
* \brief use PacketPlan to compute result
*/
template<typename SV, typename E, int dim, typename DType, PacketArch Arch>
inline void MapPacketPlan(Tensor<cpu, dim, DType> _dst,
const expr::PacketPlan<E, DType, Arch>& plan) {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
const index_t xlen = packet::LowerAlign<DType, Arch>(dst.size(1));
const size_t packetSize = packet::Packet<DType, Arch>::size;
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t x = 0; x < xlen; x += packetSize) {
packet::Saver<SV, DType, Arch>::Save(&dst[y][x], plan.EvalPacket(y, x));
}
for (index_t x = xlen; x < dst.size(1); ++x) {
SV::Save(dst[y][x], plan.Eval(y, x));
}
}
}
} // namespace expr
} // namespace mshadow
#endif // MSHADOW_PACKET_INL_H_
|
MaterialBlock.c | // -*-c++-*-
// SW4 LICENSE
// # ----------------------------------------------------------------------
// # SW4 - Seismic Waves, 4th order
// # ----------------------------------------------------------------------
// # Copyright (c) 2013, Lawrence Livermore National Security, LLC.
// # Produced at the Lawrence Livermore National Laboratory.
// #
// # Written by:
// # N. Anders Petersson (petersson1@llnl.gov)
// # Bjorn Sjogreen (sjogreen2@llnl.gov)
// #
// # LLNL-CODE-643337
// #
// # All rights reserved.
// #
// # This file is part of SW4, Version: 1.0
// #
// # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License"
// #
// # This program is free software; you can redistribute it and/or modify
// # it under the terms of the GNU General Public License (as published by
// # the Free Software Foundation) version 2, dated June 1991.
// #
// # This program is distributed in the hope that it will be useful, but
// # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
// # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
// # conditions of the GNU General Public License for more details.
// #
// # You should have received a copy of the GNU General Public License
// # along with this program; if not, write to the Free Software
// # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#include "MaterialBlock.h"
#include <iostream>
#include <cstdio>
#include "EW.h"
using namespace std;
//-----------------------------------------------------------------------
MaterialBlock::MaterialBlock( EW * a_ew, float_sw4 rho, float_sw4 vs, float_sw4 vp, float_sw4 xmin,
float_sw4 xmax, float_sw4 ymin, float_sw4 ymax, float_sw4 zmin, float_sw4 zmax,
float_sw4 qs, float_sw4 qp, float_sw4 freq )
{
m_rho = rho;
m_vp = vp;
m_vs = vs;
m_xmin = xmin;
m_xmax = xmax;
m_ymin = ymin;
m_ymax = ymax;
m_zmin = zmin;
m_zmax = zmax;
m_tol = 1e-5;
m_vpgrad = 0;
m_vsgrad = 0;
m_rhograd = 0;
m_qs = qs;
m_qp = qp;
m_freq = freq;
m_absoluteDepth = false;
mEW = a_ew;
float_sw4 bbox[6];
mEW->getGlobalBoundingBox( bbox );
// THE FOLLOWING ONLY WORKS IF m_absoluteDepth == true, or in the absence of topography
if (!mEW->topographyExists())
{
// does this block cover all points?
// note that the global_zmin can be non-zero when topography is present
// global zmin is 0 in the absense of topography, and is assigned by the grid generator when there is topography
if (xmin > bbox[0] || ymin > bbox[2] || zmin > bbox[4] ||
xmax < bbox[1] || ymax < bbox[3] || zmax < bbox[5] )
{
mCoversAllPoints = false;
// tmp
// cout << "This block does NOT cover all grid points" << endl;
}
else
{
mCoversAllPoints=true;
// tmp
// cout << "This block COVERS all grid points" << endl;
}
}
else // AP: with topography and m_absoluteDpeth == false, it is hard to say if this block covers all points
{
mCoversAllPoints=false;
}
}
//-----------------------------------------------------------------------
void MaterialBlock::set_absoluteDepth( bool absDepth )
{
m_absoluteDepth = absDepth;
}
//-----------------------------------------------------------------------
void MaterialBlock::set_gradients( float_sw4 rhograd, float_sw4 vsgrad, float_sw4 vpgrad )
{
m_rhograd = rhograd;
m_vsgrad = vsgrad;
m_vpgrad = vpgrad;
}
//-----------------------------------------------------------------------
bool MaterialBlock::inside_block( float_sw4 x, float_sw4 y, float_sw4 z )
{
return m_xmin-m_tol <= x && x <= m_xmax+m_tol && m_ymin-m_tol <= y &&
y <= m_ymax+m_tol && m_zmin-m_tol <= z && z <= m_zmax+m_tol;
}
//-----------------------------------------------------------------------
void MaterialBlock::set_material_properties( std::vector<Sarray> & rho,
std::vector<Sarray> & cs,
std::vector<Sarray> & cp,
std::vector<Sarray> & qs,
std::vector<Sarray> & qp)
{
// int pc[4];
// compute the number of parallel overlap points
// mEW->interiorPaddingCells( pc );
int material=0, outside=0;
for( int g = 0 ; g < mEW->mNumberOfCartesianGrids; g++) // Cartesian grids
{
#pragma omp parallel
{
// reference z-level for gradients is at z=0: AP changed this on 12/21/09
float_sw4 zsurf = 0.; // ?
// the following pragma causes an internal error for the cray compiler?
#pragma omp for reduction (+:material,outside)
for( int k = mEW->m_kStart[g]; k <= mEW->m_kEnd[g]; k++ )
{
for( int j = mEW->m_jStartInt[g]; j <= mEW->m_jEndInt[g]; j++ )
{
//#pragma simd
#pragma ivdep
for( int i = mEW->m_iStartInt[g]; i <= mEW->m_iEndInt[g] ; i++ )
{
float_sw4 x = (i-1)*mEW->mGridSize[g] ;
float_sw4 y = (j-1)*mEW->mGridSize[g] ;
float_sw4 z = mEW->m_zmin[g]+(k-1)*mEW->mGridSize[g];
//printf("x ,y,z %f %f %f %f\n",x,y,z,mEW->m_zmin[g]);
float_sw4 depth;
if (m_absoluteDepth)
{
depth = z;
}
else
{
mEW->getDepth(x, y, z, depth);
}
if(inside_block(x,y,depth))
{
if( m_rho != -1 )
rho[g](i,j,k) = m_rho + m_rhograd*(depth-zsurf);
if( m_vs != -1 )
cs[g](i,j,k) = m_vs + m_vsgrad*(depth-zsurf);
if( m_vp != -1 )
cp[g](i,j,k) = m_vp + m_vpgrad*(depth-zsurf);
if( m_qp != -1 && qp[g].is_defined())
qp[g](i,j,k) = m_qp;
if( m_qs != -1 && qs[g].is_defined())
qs[g](i,j,k) = m_qs;
material++;
}
else
{
outside++;
if (mEW->getVerbosity() > 2)
{
printf("Point (i,j,k)=(%i, %i, %i) in grid g=%i\n"
"with (x,y,z)=(%e,%e,%e) and depth=%e\n"
"is outside the block domain: %e<= x <= %e, %e <= y <= %e, %e <= depth <= %e\n",
i, j, k, g,
x, y, z, depth,
m_xmin, m_xmax, m_ymin, m_ymax, m_zmin, m_zmax);
}
} // end ! inside_block
} // end for i
}// end for j
} // end for k
} // end pragma omp parallel
// communicate material properties to ghost points (necessary on refined meshes because ghost points don't have a well defined depth/topography)
mEW->communicate_array( rho[g], g );
mEW->communicate_array( cs[g], g );
mEW->communicate_array( cp[g], g );
if (qs[g].is_defined())
mEW->communicate_array( qs[g], g );
if (qp[g].is_defined())
mEW->communicate_array( qp[g], g );
} // end for all Cartesian grids
if (mEW->topographyExists()) // curvilinear grid
{
#pragma omp parallel
{
int g = mEW->mNumberOfGrids-1;
// reference z-level for gradients is at z=0: AP changed this on 12/21/09
float_sw4 zsurf = 0.;
// the following pragma causes an internal error for the cray compiler?
#pragma omp for reduction (+:material,outside)
for( int k = mEW->m_kStart[g] ; k <= mEW->m_kEnd[g]; k++ )
{
for( int j = mEW->m_jStart[g] ; j <= mEW->m_jEnd[g]; j++ )
{
//#pragma simd
#pragma ivdep
for( int i = mEW->m_iStart[g] ; i <= mEW->m_iEnd[g] ; i++ )
{
float_sw4 x = mEW->mX(i,j,k);
float_sw4 y = mEW->mY(i,j,k);
float_sw4 z = mEW->mZ(i,j,k);
//printf("x ,y,z %f %f %f %f\n",x,y,z,mEW->m_zmin[g]);
float_sw4 depth;
if (m_absoluteDepth)
{
depth = z;
}
else
{
depth = z - mEW->mZ(i,j,1);
}
if(inside_block(x,y,depth))
{
if( m_rho != -1 )
rho[g](i,j,k) = m_rho + m_rhograd*(depth-zsurf);
if( m_vs != -1 )
cs[g](i,j,k) = m_vs + m_vsgrad*(depth-zsurf);
if( m_vp != -1 )
cp[g](i,j,k) = m_vp + m_vpgrad*(depth-zsurf);
if( m_qp != -1 && qp[g].is_defined())
qp[g](i,j,k) = m_qp;
if( m_qs != -1 && qs[g].is_defined())
qs[g](i,j,k) = m_qs;
material++;
}
else
{
if (mEW->getVerbosity() > 2)
{
printf("Point (i,j,k)=(%i, %i, %i) in grid g=%i\n"
"with (x,y,z)=(%e,%e,%e) and depth=%e\n"
"is outside the block domain: %e<= x <= %e, %e <= y <= %e, %e <= depth <= %e\n",
i, j, k, g,
x, y, z, depth,
m_xmin, m_xmax, m_ymin, m_ymax, m_zmin, m_zmax);
}
outside++;
}
}
}
}
}
} // end if topographyExists
int outsideSum, materialSum;
MPI_Reduce(&outside, &outsideSum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
MPI_Reduce(&material, &materialSum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
if (mEW->getVerbosity() >=2 && mEW->m_myrank == 0)
cout << "block command: outside = " << outsideSum << ", " << "material = " << materialSum << endl;
} // end MaterialBlock::set_material_properties
|
GB_binop__eq_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int32)
// A*D function (colscale): GB (_AxD__eq_int32)
// D*A function (rowscale): GB (_DxB__eq_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int32)
// C=scalar+B GB (_bind1st__eq_int32)
// C=scalar+B' GB (_bind1st_tran__eq_int32)
// C=A+scalar GB (_bind2nd__eq_int32)
// C=A'+scalar GB (_bind2nd_tran__eq_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT32 || GxB_NO_EQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__eq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes+ \
MaxStrokePad,sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes+ \
MaxStrokePad,sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
GB_reduce_to_scalar_template.c | //------------------------------------------------------------------------------
// GB_reduce_to_scalar_template: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar, with typecasting and generic operators.
// No panel is used.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *restrict Ax = A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// single thread
//----------------------------------------------------------------------
// s = (ztype) Ax [0]
GB_CAST_ARRAY_TO_SCALAR (s, Ax, 0) ;
for (int64_t p = 1 ; p < anz ; p++)
{
// check for early exit
GB_BREAK_IF_TERMINAL (s) ;
// s = op (s, (ztype) Ax [p])
GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ;
}
}
else
{
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
bool early_exit = false ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
// ztype t = (ztype) Ax [pstart], with typecast
GB_SCALAR (t) ;
GB_CAST_ARRAY_TO_SCALAR (t, Ax, pstart) ;
GB_IF_NOT_EARLY_EXIT
{
for (int64_t p = pstart+1 ; p < pend ; p++)
{
// check for early exit
GB_PARALLEL_BREAK_IF_TERMINAL (t) ;
// t = op (t, (ztype) Ax [p]), with typecast
GB_ADD_CAST_ARRAY_TO_SCALAR (t, Ax, p) ;
}
}
// W [tid] = t, no typecast
GB_COPY_SCALAR_TO_ARRAY (W, tid, t) ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
// s = W [0], no typecast
GB_COPY_ARRAY_TO_SCALAR (s, W, 0) ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
}
|
spbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpbsv.c, normal z -> s, Fri Sep 28 17:38:08 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_pbsv
*
* Computes the solution to a system of linear equations A * X = B,
* where A is an n-by-n symmetric positive definite band matrix, and X and B
* are n-by-nrhs matrices. The Cholesky decomposition is used to factor A as
*
* \f[ A = L\times L^T, \f] if uplo = PlasmaLower,
* or
* \f[ A = U^T\times U, \f] if uplo = PlasmaUpper,
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] kd
* The number of subdiagonals within the band of A if uplo=upper.
* The number of suuperdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns
* of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* On entry, the upper or lower triangle of the symmetric band
* matrix A, stored in the first KD+1 rows of the array. The
* j-th column of A is stored in the j-th column of the array AB
* as follows:
* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j;
* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd).
* \n
* On exit, if INFO = 0, the triangular factor U or L from the
* Cholesky factorization A = U^T*U or A = L*L^T of the band
* matrix A, in the same storage format as A.
*
* @param[in] ldab
* The leading dimension of the array AB. ldab >= max(1,n).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_spbsv
* @sa plasma_cpbsv
* @sa plasma_dpbsv
* @sa plasma_spbsv
*
******************************************************************************/
int plasma_spbsv(plasma_enum_t uplo,
int n, int kd, int nrhs,
float *pAB, int ldab,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kd < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (ldab < kd+1) {
plasma_error("illegal value of ldab");
return -6;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_pbtrf(plasma, PlasmaRealFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
int lm = nb*(1+(kd+nb-1)/nb);
plasma_desc_t AB;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_band_create(PlasmaRealFloat, uplo, nb, nb,
lm, n, 0, 0, n, n, kd, kd, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
ldb, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_spb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_spbsv(uplo, AB, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2pb(AB, pAB, ldab, &sequence, &request);
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_pbsv
*
* Solves a symmetric positive definite band system of linear equations
* using Cholesky factorization.
* Non-blocking tile version of plasma_spbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[in,out] B
* Descriptor of right-hand-sides B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_spbsv
* @sa plasma_omp_cpbsv
* @sa plasma_omp_dpbsv
* @sa plasma_omp_spbsv
*
******************************************************************************/
void plasma_omp_spbsv(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pspbtrf(uplo, AB, sequence, request);
plasma_pstbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
plasma_pstbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
}
|
target_teams_distribute_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute foo
void test_no_clause() {
int i;
#pragma omp target teams distribute
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute' must be a for loop}}
#pragma omp target teams distribute
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
#pragma omp target teams distribute collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target teams distribute collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
CB_PackingScalarNode.h | #ifndef _CB_PACK_S_NODE_H_
#define _CB_PACK_S_NODE_H_
/*
###################################################################################
#
# CBrick
#
# Copyright (c) 2017-2020 Research Institute for Information Technology(RIIT),
# Kyushu University. All rights reserved.
#
####################################################################################
*/
/*
* @file CB_PackingScalarNode.h
* @brief BrickComm class
*/
// #########################################################
/*
* @brief pack send data for I direction
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendm send buffer of I- direction
* @param [out] sendp send buffer of I+ direction
* @param [in] nIDm Rank number of I- direction
* @param [in] nIDp Rank number of I+ direction
*/
template <class T>
void BrickComm::pack_SXnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
/*
<--gc-->
rankA [NI-3] [NI-2] [NI-1] [NI] [NI+1]
-----+------+------|------+------+-------> i
rankB [-2] [-1] [0] [1] [2]
<--gc-->
*/
// 自領域のデータをマイナス側のランクに送る
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
sendm[_IDX_SI(i,j,k,NJ,gc)] = array[_IDX_S3D(i+1,j,k,NI,NJ,VC)];
}
}
}
}
// 自領域のデータをプラス側のランクに送る
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
sendp[_IDX_SI(i,j,k,NJ,gc)] = array[_IDX_S3D(NI-2+i,j,k,NI,NJ,VC)];
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for I direction
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of I- direction
* @param [in] recvp recv buffer of I+ direction
* @param [in] nIDm Rank number of I- direction
* @param [in] nIDp Rank number of I+ direction
*/
template <class T>
void BrickComm::unpack_SXnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
/*
<--gc-->
rankA [NI-3] [NI-2] [NI-1] [NI] [NI+1]
-----+------+------|------+------+-------> i
rankB [-2] [-1] [0] [1] [2]
<--gc-->
*/
// マイナス側からのデータを自領域のガイドセルにコピー
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
array[_IDX_S3D(i-1,j,k,NI,NJ,VC)] = recvm[_IDX_SI(i,j,k,NJ,gc)];
}
}
}
}
// プラス側からのデータを自領域のガイドセルにコピー
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<gc; i++ ){
array[_IDX_S3D(NI+i,j,k,NI,NJ,VC)] = recvp[_IDX_SI(i,j,k,NJ,gc)];
}
}
}
}
}
// #########################################################
/*
* @brief pack send data for J direction
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendm send buffer of J- direction
* @param [out] sendp send buffer of J+ direction
* @param [in] nIDm Rank number of J- direction
* @param [in] nIDp Rank number of J+ direction
*/
template <class T>
void BrickComm::pack_SYnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendm[_IDX_SJ(i,j,k,NI,gc)] = array[_IDX_S3D(i,j+1,k,NI,NJ,VC)];
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendp[_IDX_SJ(i,j,k,NI,gc)] = array[_IDX_S3D(i,NJ-2+j,k,NI,NJ,VC)];
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for J direction
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of J- direction
* @param [in] recvp recv buffer of J+ direction
* @param [in] nIDm Rank number of J- direction
* @param [in] nIDp Rank number of J+ direction
*/
template <class T>
void BrickComm::unpack_SYnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_S3D(i,j-1,k,NI,NJ,VC)] = recvm[_IDX_SJ(i,j,k,NI,gc)];
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<NK; k++ ){
for( int j=0; j<gc; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_S3D(i,NJ+j,k,NI,NJ,VC)] = recvp[_IDX_SJ(i,j,k,NI,gc)];
}
}
}
}
}
// #########################################################
/*
* @brief pack send data for K direction
* @param [in] array source array
* @param [in] gc number of guide cell layer actually to be sent
* @param [out] sendm send buffer of K- direction
* @param [out] sendp send buffer of K+ direction
* @param [in] nIDm Rank number of K- direction
* @param [in] nIDp Rank number of K+ direction
*/
template <class T>
void BrickComm::pack_SZnode(const T *array,
const int gc,
T *sendm,
T *sendp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendm[_IDX_SK(i,j,k,NI,NJ)] = array[_IDX_S3D(i,j,k+1,NI,NJ,VC)];
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
sendp[_IDX_SK(i,j,k,NI,NJ)] = array[_IDX_S3D(i,j,NK-2+k,NI,NJ,VC)];
}
}
}
}
}
// #########################################################
/*
* @brief unpack send data for K direction
* @param [in,out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvm recv buffer of K- direction
* @param [in] recvp recv buffer of K+ direction
* @param [in] nIDm Rank number of K- direction
* @param [in] nIDp Rank number of K+ direction
*/
template <class T>
void BrickComm::unpack_SZnode(T *array,
const int gc,
const T *recvm,
const T *recvp,
const int nIDm,
const int nIDp)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
if( nIDm >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_S3D(i,j,k-1,NI,NJ,VC)] = recvm[_IDX_SK(i,j,k,NI,NJ)];
}
}
}
}
if( nIDp >= 0 )
{
#pragma omp parallel for collapse(2)
for( int k=0; k<gc; k++ ){
for( int j=0; j<NJ; j++ ){
#pragma novector
for( int i=0; i<NI; i++ ){
array[_IDX_S3D(i,j,NK+k,NI,NJ,VC)] = recvp[_IDX_SK(i,j,k,NI,NJ)];
}
}
}
}
}
#ifdef _DIAGONAL_COMM
// #########################################################
/*
* @brief pack send data for diagonal edge
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendbuf send buffer
* @param [out] recvbuf recv buffer
* @param [out] req Array of MPI request
* @retval true-success, false-fail
*/
template <class T>
bool BrickComm::pack_SEnode(T *array,
const int gc,
T *sendbuf,
T *recvbuf,
MPI_Request *req)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
int tag = 0;
size_t ptr = 0;
//// X edge ////
for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = (NI-1) * gc * gc;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-1,NI-1,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_S3D(i-1,j-(NJ-gc),k-1,NI-1,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_mYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-(NK-gc),NI-1,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<NI; i++ ){
sendptr[_IDX_S3D(i-1,j-(NJ-gc),k-(NK-gc),NI-1,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
//// Y edge ////
for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * (NJ-1) * gc;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mXmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-1,gc,NJ-1,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pXmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-1,k-1,gc,NJ-1,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_mXpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-(NK-gc),gc,NJ-1,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pXpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-1,k-(NK-gc),gc,NJ-1,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
//// Z edge ////
for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * (NK-1);
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(E_mXmY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pXmY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-1,k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_mXpY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-(NJ-gc),k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(E_pXpY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-(NJ-gc),k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
return true;
}
// #########################################################
/*
* @brief unpack send data for diagonal edge
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvbuf recv buffer
*/
template <class T>
void BrickComm::unpack_SEnode(T *array,
const int gc,
const T *recvbuf)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
size_t ptr = 0;
//// X edge ////
for( int dir=int(E_mYmZ);dir<=int(E_pYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = (NI-1) * gc * gc;
// unpack
switch(dir)
{
case int(E_mYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-1,j-(1-gc),k-(1-gc),NI-1,gc,0)];
}
}
}
break;
case int(E_pYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-1,j-(NJ),k-(1-gc),NI-1,gc,0)];
}
}
}
break;
case int(E_mYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-1,j-(1-gc),k-(NK),NI-1,gc,0)];
}
}
}
break;
case int(E_pYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1; i<NI; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-1,j-(NJ),k-(NK),NI-1,gc,0)];
}
}
}
break;
}
ptr += sz;
}
}
//// Y edge ////
for( int dir=int(E_mXmZ);dir<=int(E_pXpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * (NJ-1) * gc;
// unpack
switch(dir)
{
case int(E_mXmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-1,k-(1-gc),gc,NJ-1,0)];
}
}
}
break;
case int(E_pXmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-1,k-(1-gc),gc,NJ-1,0)];
}
}
}
break;
case int(E_mXpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-1,k-(NK),gc,NJ-1,0)];
}
}
}
break;
case int(E_pXpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=1; j<NJ; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-1,k-(NK),gc,NJ-1,0)];
}
}
}
break;
}
ptr += sz;
}
}
//// Z edge ////
for( int dir=int(E_mXmY);dir<=int(E_pXpY);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * (NK-1);
// unpack
switch(dir)
{
case int(E_mXmY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(1-gc),k-1,gc,gc,0)];
}
}
}
break;
case int(E_pXmY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(1-gc),k-1,gc,gc,0)];
}
}
}
break;
case int(E_mXpY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(NJ),k-1,gc,gc,0)];
}
}
}
break;
case int(E_pXpY):
#pragma omp parallel for collapse(3)
for( int k=1; k<NK; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(NJ),k-1,gc,gc,0)];
}
}
}
break;
}
ptr += sz;
}
}
}
// #########################################################
/*
* @brief pack send data for diagonal corner
* @param [in] array source array
* @param [in] gc number of guide cell layer to be sent
* @param [out] sendbuf send buffer
* @param [out] recvbuf recv buffer
* @param [out] req Array of MPI request
* @retval true-success, false-fail
*/
template <class T>
bool BrickComm::pack_SCnode(T *array,
const int gc,
T *sendbuf,
T *recvbuf,
MPI_Request *req)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
int tag = 0;
size_t ptr = 0;
//// 8 corner ////
for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
T *sendptr = &sendbuf[ptr];
T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * gc;
/* recv
if ( MPI_SUCCESS != MPI_Irecv(recvptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2]) ) return false;
*/
if ( !IrecvData(recvptr,
sz,
comm_tbl[dir],
&req[dir*2]) ) return false;
// pack
switch(dir)
{
case int(C_mXmYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_pXmYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-1,k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_mXpYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-(NJ-gc),k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_pXpYmZ):
#pragma omp parallel for collapse(3)
for( int k=1; k<=gc; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-(NJ-gc),k-1,gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_mXmYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-1,k-(NK-gc),gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_pXmYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=1; j<=gc; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-1,k-(NK-gc),gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_mXpYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=1; i<=gc; i++ ){
sendptr[_IDX_S3D(i-1,j-(NJ-gc),k-(NK-gc),gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
case int(C_pXpYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK-gc; k<NK; k++ ){
for( int j=NJ-gc; j<NJ; j++ ){
for( int i=NI-gc; i<NI; i++ ){
sendptr[_IDX_S3D(i-(NI-gc),j-(NJ-gc),k-(NK-gc),gc,gc,0)] = array[_IDX_S3D(i,j,k,NI,NJ,VC)];
}
}
}
break;
}
/* send
if ( MPI_SUCCESS != MPI_Isend(sendptr,
sz,
dtype,
comm_tbl[dir],
tag,
MPI_COMM_WORLD,
&req[dir*2+1]) ) return false;
*/
if ( !IsendData(sendptr,
sz,
comm_tbl[dir],
&req[dir*2+1]) ) return false;
// pointer
ptr += sz;
}
}
return true;
}
// #########################################################
/*
* @brief unpack send data for diagonal corner
* @param [out] array dest array
* @param [in] gc number of guide cell layer to be sent
* @param [in] recvbuf recv buffer
*/
template <class T>
void BrickComm::unpack_SCnode(T *array,
const int gc,
const T *recvbuf)
{
int NI = size[0];
int NJ = size[1];
int NK = size[2];
int VC = halo_width;
size_t ptr = 0;
//// 8 corner ////
for( int dir=int(C_mXmYmZ);dir<=int(C_pXpYpZ);dir++ )
{
if( comm_tbl[dir] >= 0 )
{
const T *recvptr = &recvbuf[ptr];
size_t sz = gc * gc * gc;
// unpack
switch(dir)
{
case int(C_mXmYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(1-gc),k-(1-gc),gc,gc,0)];
}
}
}
break;
case int(C_pXmYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(1-gc),k-(1-gc),gc,gc,0)];
}
}
}
break;
case int(C_mXpYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(NJ),k-(1-gc),gc,gc,0)];
}
}
}
break;
case int(C_pXpYmZ):
#pragma omp parallel for collapse(3)
for( int k=1-gc; k<=0; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(NJ),k-(1-gc),gc,gc,0)];
}
}
}
break;
case int(C_mXmYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(1-gc),k-(NK),gc,gc,0)];
}
}
}
break;
case int(C_pXmYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=1-gc; j<=0; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(1-gc),k-(NK),gc,gc,0)];
}
}
}
break;
case int(C_mXpYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=1-gc; i<=0; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(1-gc),j-(NJ),k-(NK),gc,gc,0)];
}
}
}
break;
case int(C_pXpYpZ):
#pragma omp parallel for collapse(3)
for( int k=NK; k<NK+gc; k++ ){
for( int j=NJ; j<NJ+gc; j++ ){
for( int i=NI; i<NI+gc; i++ ){
array[_IDX_S3D(i,j,k,NI,NJ,VC)] = recvptr[_IDX_S3D(i-(NI),j-(NJ),k-(NK),gc,gc,0)];
}
}
}
break;
}
ptr += sz;
}
}
}
#endif // _DIAGONAL_COMM
#endif // _CB_PACK_S_NODE_H_
|
Image.h | #pragma once
#include<vector>
#include<fstream>
#include<iostream>
#include<functional>
#include<cmath>
#include<random>
#include "Vec3.h"
#include "PNG.h"
using namespace std;
// Returns the distance between the two vectors projected on the unit sphere
float projectedDistance(float x1, float y1, float x2, float y2) {
return 2 - 2 * x1 * x2 - 2 * y1 * y2 - 2 * sqrt((1 - x1 * x1 - y1 * y1) * (1 - x2 * x2 - y2 * y2));
}
float projectedDistance(Vec3f v1, Vec3f v2) {
return projectedDistance(v1[0], v1[1], v2[0], v2[1]);
}
Vec3f nearestSeed(Vec3f q, const vector<vector<Vec3f>> &seeds) {
Vec3f nearest = {0.5, 0.5, 0};
if (seeds.size() == 0 || seeds[0].size() == 0)
return nearest;
for (const vector<Vec3f> &layer: seeds) {
for (Vec3f seed: layer) {
if ((q - seed).length() < (q - nearest).length()) {
nearest = seed;
}
}
}
return nearest;
}
// TODO: better search, kd-tree or something
vector<Vec3f> nearestSeeds(Vec3f q, const vector<vector<Vec3f>> &seeds, function<bool(Vec3f)> test) {
vector<Vec3f> nearests;
if (seeds.size() == 0 || seeds[0].size() == 0)
return nearests;
for (const vector<Vec3f> &layer: seeds) {
Vec3f nearest = {0.5, 0.5, 0};
for (Vec3f seed: layer) {
if (test(seed) && (q - seed).length() < (q - nearest).length()) {
nearest = seed;
}
}
nearests.push_back(nearest);
}
return nearests;
}
float grid1d(float q, int d) {
float step = pow(0.5, d + 1);
if (q < step)
return step;
if (q > 1 - step)
return 1 - step;
else
return round(q / step) * step;
}
Vec3f gridSeed(Vec3f q, int d) {
return {grid1d(q[0], d), grid1d(q[1], d), 0};
}
void genSeeds(int depth, vector<vector<Vec3f>> &seeds) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
for (int d=0; d<depth; d++) {
seeds.emplace_back();
for (int i=0; i<pow(2, d + 1) - 1; i++) {
for (int j=0; j<pow(2, d + 1) - 1; j++) {
seeds.back().push_back((float)pow(0.5, d + 1) * Vec3f({i + 0.75f + 0.5f * float(dis(gen)), j + 0.75f + 0.5f * float(dis(gen)), 0}));
}
}
}
}
class Image {
public:
int m_width, m_height;
vector<Vec3f> m_data; // Values between 0 and 1
Image() {
m_width = 0;
m_height = 0;
m_data = vector<Vec3f>();
}
Image(int w, int h) {
m_width = w;
m_height = h;
m_data = vector<Vec3f>();
m_data.assign(w * h, {0.0, 0.0, 0.0});
}
void write(const char *fname) {
int max_color = 255;
ofstream outfile;
outfile.open(fname, ios::out | ios::trunc );
outfile << "P3" << endl << m_width << " " << m_height << endl << max_color << endl;
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
Vec3f pxl = m_data.at(y * m_width + x);
int r = (int)(max_color * max(0.0f, min(1.0f, pxl[0])));
int g = (int)(max_color * max(0.0f, min(1.0f, pxl[1])));
int b = (int)(max_color * max(0.0f, min(1.0f, pxl[2])));
outfile << r << " " << g << " " << b << " ";
}
outfile << endl;
}
outfile.close();
}
void fillBackground(Vec3f color1={0.0, 0.0, 1.0}, Vec3f color2={1.0, 1.0, 1.0}) {
// #pragma omp parallel for
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
float t = (float)y / (float)m_height;
m_data[y * m_width + x] = {(1 - t) * color1[0] + t * color2[0],
(1 - t) * color1[1] + t * color2[1],
(1 - t) * color1[2] + t * color2[2]};
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
// Uses styleBlit's brute force algorithm
void styleBlit(const char *fname, float threshold = 0.05) {
vector<bool> done = vector<bool>(m_width * m_height, false);
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
if (!done[y * m_width + x]) {
Vec3f normal = m_data.at(y * m_width + x);
if (normal[2] <= __FLT_EPSILON__) {// If there is no mesh there or only the ground (-1 or 0)
m_data[y * m_width + x] = {0, 0, 0};
done[y * m_width + x] = true;
}
else {
float abs_norm = 0.5f + normal[0] / 2.0f;
float ord_norm = 0.5f + normal[1] / 2.0f;
int abs = (int)floor(abs_norm * pngr.width);
int ord = (int)floor(ord_norm * pngr.height);
for (int y_src = 0; y_src < pngr.height; y_src++) {
for (int x_src = 0; x_src < pngr.width; x_src++) {
int x_new = x + x_src - abs;
int y_new = y + y_src - ord;
if (0<=y_new && y_new<m_height &&
0<=x_new && x_new<m_width &&
!done[y_new * m_width + x_new] &&
m_data[y_new * m_width + x_new][2]>__FLT_EPSILON__) {
Vec3f new_normal = m_data[y_new * m_width + x_new];
Vec3f src_normal = {float(x_src) / float(pngr.width) * 2.0f - 1.0f, float(y_src) / float(pngr.height) * 2.0f - 1.0f, 0};
float error = projectedDistance(new_normal[0],
new_normal[1],
src_normal[0],
src_normal[1]);
if (error < threshold) {
png_byte *src_pxl = &pngr.row_pointers[y_src][x_src * 4]; // *4 because of the number of channels
m_data[y_new * m_width + x_new] = {float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f};
done[y_new * m_width + x_new] = true;
}
}
}
}
}
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
// Uses styleBlit's seeded version
void styleBlitTree(const char *fname, float threshold = 0.05, int depth=8, float ball_scale = 0.3) {
// Copy the guiding channels
vector<Vec3f> guides = this->m_data;
this->m_data.assign(m_width * m_height, {0, 0, 0});
vector<float> coefs = vector<float>();
coefs.assign(m_width * m_height, 0);
// Generate seeds
vector<vector<Vec3f>> seeds;
genSeeds(depth, seeds);
// Transfer style
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
Vec3f normal = guides[y * m_width + x];
if (normal[2] > __FLT_EPSILON__) {
Vec3f pos_01 = {float(x) / float(m_width), float(y) / float(m_height), 0};
function<bool(Vec3f)> test = [this, &guides](Vec3f s){return abs(guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))][2] - 1) < __FLT_EPSILON__;};
vector<Vec3f> ns = nearestSeeds(pos_01, seeds, test);
int depth = 0;
for (Vec3f s: ns) {
Vec3f target_seed_normal = guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))];
Vec3f src_seed_01 = (target_seed_normal + Vec3f(1.0f, 1.0f, 0)) / 2.0f;
Vec3f predicted_src_01 = src_seed_01 + (pos_01 - s) / (ball_scale * pow(0.9, depth));
Vec3f predicted_normal = {2.0f * float(predicted_src_01[0]) - 1.0f,
2.0f * float(predicted_src_01[1]) - 1.0f, 0};
float error = projectedDistance(normal, predicted_normal);
if (!isnan(error) && abs(target_seed_normal[2] - 1) < __FLT_EPSILON__) {
float coef = 1.0f / (1 + exp(200 * (error - threshold)));
Vec3i predicted_src = {int(predicted_src_01[0] * pngr.width), int(predicted_src_01[1] * pngr.height), 0};
png_byte *src_pxl = &pngr.row_pointers[predicted_src[1]][predicted_src[0] * 4]; // *4 because of the number of channels
this->m_data[y * m_width + x] += coef * (1 - coefs[y * m_width + x]) * Vec3f(float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f);
coefs[y * m_width + x] += coef * (1 - coefs[y * m_width + x]);
}
depth++;
}
m_data[y * m_width + x] /= coefs[y * m_width + x];
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere, but actually only store the coordinates of the source point to transfer in m_data
// Output should be passed through transferFrom afterwards
// Uses styleBlit's seeded version
void styleBlitTreeCoords(float threshold = 0.05, int depth=8, float ball_scale = 0.3) {
// Copy the guiding channels
vector<Vec3f> guides = this->m_data;
this->m_data.assign(m_width * m_height, {0, 0, 0});
// vector<float> coefs = vector<float>();
// coefs.assign(m_width * m_height, 0);
// Generate seeds
vector<vector<Vec3f>> seeds;
genSeeds(depth, seeds);
// Transfer style
// PNG_handler pngr = PNG_handler();
// pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
Vec3f normal = guides[y * m_width + x];
if (normal[2] > __FLT_EPSILON__) {
Vec3f pos_01 = {float(x) / float(m_width), float(y) / float(m_height), 0};
function<bool(Vec3f)> test = [this, &guides](Vec3f s){return abs(guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))][2] - 1) < __FLT_EPSILON__;};
vector<Vec3f> ns = nearestSeeds(pos_01, seeds, test);
for (Vec3f s: ns) {
Vec3f target_seed_normal = guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))];
Vec3f src_seed_01 = (target_seed_normal + Vec3f(1.0f, 1.0f, 0)) / 2.0f;
Vec3f predicted_src_01 = src_seed_01 + (pos_01 - s) / ball_scale;
Vec3f predicted_normal = {2.0f * float(predicted_src_01[0]) - 1.0f,
2.0f * float(predicted_src_01[1]) - 1.0f, 0};
float error = projectedDistance(normal, predicted_normal);
// cout << target_seed_normal << " | " << normal << " | " << s << " | " << pos_01 << " | " << src_seed_01 << " | " << predicted_normal << " | " << error << endl;
if (!isnan(error) && abs(target_seed_normal[2] - 1) < __FLT_EPSILON__ && error < threshold) {
// float coef = 1.0f / (1 + exp(5000 * (error - threshold)));
// Vec3i predicted_src = {int(predicted_src_01[0] * pngr.width), int(predicted_src_01[1] * pngr.height), 0};
// png_byte *src_pxl = &pngr.row_pointers[predicted_src[1]][predicted_src[0] * 4]; // *4 because of the number of channels
// this->m_data[y * m_width + x] = Vec3f(float((int)src_pxl[0]) / 255.0f,
// float((int)src_pxl[1]) / 255.0f,
// float((int)src_pxl[2]) / 255.0f);
this->m_data[y * m_width + x] = Vec3f(predicted_src_01[0], predicted_src_01[1], predicted_src_01[2]);
break;
// coefs[y * m_width + x] += coef * (1 - coefs[y * m_width + x]);
}
}
// m_data[y * m_width + x] /= coefs[y * m_width + x];
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
void litSphere(const char *fname) {
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
Vec3f normal = m_data.at(y * m_width + x);
if (normal[2] <= __FLT_EPSILON__)
m_data[y * m_width + x] = {0, 0, 0};
else {
float abs_norm = 0.5f + normal[0] / 2.0f;
float ord_norm = 0.5f + normal[1] / 2.0f;
int abs = (int)floor(abs_norm * pngr.width);
int ord = (int)floor(ord_norm * pngr.height);
png_byte *src_pxl = &pngr.row_pointers[ord][abs * 4]; // *4 because of the number of channels
// printf("%4d, %4d = RGBA(%3d, %3d, %3d, %3d)\n", x, y, src_pxl[0], src_pxl[1], src_pxl[2], src_pxl[3]);
m_data[y * m_width + x] = {float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f};
}
}
}
}
void linearBlur(float radius) {
// Copy the image
vector<Vec3f> data_bak = this->m_data;
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
if (abs(m_data[y * m_width + x][2] - 1) < 1) {
Vec3f value = {0, 0, 0};
float coef = 0;
for (int y_off = -ceil(radius); y_off <= ceil(radius); y_off++) {
for (int x_off = -ceil(radius); x_off <= ceil(radius); x_off++) {
if (pow(x_off, 2) + pow(y_off, 2) < pow(radius, 2)) {
value += data_bak[(y + y_off) * m_width + (x + x_off)];
coef += 1;
}
}
}
m_data[y * m_width + x] = value / coef;
}
}
}
}
void transferFrom(const char *fname) {
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
if (abs(m_data[y * m_width + x][2] - 1) < 0.9) {
png_byte *src_pxl = &pngr.row_pointers[int(round(m_data[y * m_width + x][1] * pngr.height))][int(round(m_data[y * m_width + x][0] * pngr.width)) * 4]; // *4 because of the number of channels
this->m_data[y * m_width + x] = Vec3f(float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f);
}
}
}
}
};
|
reduction_teams.c | #include <stdio.h>
#include <omp.h>
#define N 1000000ll
#define SUM (N * (N-1)/2)
void checkHost(int gpu_error, int* errors, long long a){
int host_error = 0;
if (a != SUM){
printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM);
host_error = 1;
(*errors)++;
}
if(!host_error && !gpu_error){
printf("-----> Success\n");
} else{
printf("-----> Failure\n");
}
}
void reduction(int num_teams, int num_threads, int* errors){
long long result = 0;
int gpu_error = 0;
#pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: result)
{
long long a, i;
a = 0;
#pragma omp parallel for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
result = a;
if (a != SUM && omp_get_team_num() <= 50){ //limit teams that print
printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM);
gpu_error = 1;
}
} //end of target
checkHost(gpu_error, errors, result);
}
int main (void)
{
int errors = 0;
int gpu_error = 0;
printf("\n---------- Multiple Teams ----------\n");
printf("\nRunning 2 Teams with 64 thread per team\n");
reduction(2, 64, &errors);
printf("\nRunning 2 Teams with 128 threads per team\n");
reduction(2, 128, &errors);
printf("\nRunning 2 Teams with 256 threads per team\n");
reduction(2, 256, &errors);
printf("\nRunning 256 Teams with 256 threads per team (Limited to print first 50 teams)\n");
reduction(256, 256, &errors);
printf("\nRunning 4096 Teams with 64 threads per team (Limited to print first 50 teams)\n");
reduction(4096, 64, &errors);
printf("\nRunning 4096 Teams with 256 threads per team (Limited to print first 50 teams)\n");
reduction(4096, 256, &errors);
if(!errors){
printf("\nRESULT: ALL RUNS SUCCESSFUL!\n");
return 0;
} else{
printf("\nRESULT: FAILURES OCCURED!\n");
return -1;
}
}
|
IntegratorHPMCMonoImplicit.h | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#ifndef __HPMC_MONO_IMPLICIT__H__
#define __HPMC_MONO_IMPLICIT__H__
#include "IntegratorHPMCMono.h"
#include "hoomd/Autotuner.h"
#include <random>
#include <cfloat>
#ifdef _OPENMP
#include <omp.h>
#endif
/*! \file IntegratorHPMCMonoImplicit.h
\brief Defines the template class for HPMC with implicit generated depletant solvent
\note This header cannot be compiled by nvcc
*/
#ifdef NVCC
#error This header cannot be compiled by nvcc
#endif
#include <hoomd/extern/pybind/include/pybind11/pybind11.h>
namespace hpmc
{
//! Template class for HPMC update with implicit depletants
/*!
Depletants are generated randomly on the fly according to the semi-grand canonical ensemble.
The penetrable depletants model is simulated.
\ingroup hpmc_integrators
*/
template< class Shape >
class IntegratorHPMCMonoImplicit : public IntegratorHPMCMono<Shape>
{
public:
//! Construct the integrator
IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed);
//! Destructor
virtual ~IntegratorHPMCMonoImplicit();
//! Set the depletant density in the free volume
void setDepletantDensity(Scalar n_R)
{
m_n_R = n_R;
m_need_initialize_poisson = true;
}
//! Set the type of depletant particle
void setDepletantType(unsigned int type)
{
m_type = type;
}
//! Number of depletant-reinsertions
/*! \param n_trial Depletant reinsertions per overlapping depletant
*/
void setNTrial(unsigned int n_trial)
{
m_n_trial = n_trial;
}
//! Return number of depletant re-insertions
unsigned int getNTrial()
{
return m_n_trial;
}
//! Returns the depletant density
Scalar getDepletantDensity()
{
return m_n_R;
}
//! Return the depletant type
unsigned int getDepletantType()
{
return m_type;
}
//! Return the number of re-insertion trials
unsigned int getNumTrials() const
{
return m_n_trial;
}
//! Reset statistics counters
virtual void resetStats()
{
IntegratorHPMCMono<Shape>::resetStats();
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
m_implicit_count_run_start = h_counters.data[0];
}
//! Print statistics about the hpmc steps taken
virtual void printStats()
{
IntegratorHPMCMono<Shape>::printStats();
hpmc_implicit_counters_t result = getImplicitCounters(1);
double cur_time = double(this->m_clock.getTime()) / Scalar(1e9);
this->m_exec_conf->msg->notice(2) << "-- Implicit depletants stats:" << "\n";
this->m_exec_conf->msg->notice(2) << "Depletant insertions per second: "
<< double(result.insert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Configurational bias attempts per second: "
<< double(result.reinsert_count)/cur_time << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of depletants in free volume: "
<< result.getFreeVolumeFraction() << "\n";
this->m_exec_conf->msg->notice(2) << "Fraction of overlapping depletants: "
<< result.getOverlapFraction()<< "\n";
}
//! Get the current counter values
hpmc_implicit_counters_t getImplicitCounters(unsigned int mode=0);
/* \returns a list of provided quantities
*/
std::vector< std::string > getProvidedLogQuantities()
{
// start with the integrator provided quantities
std::vector< std::string > result = IntegratorHPMCMono<Shape>::getProvidedLogQuantities();
// then add ours
result.push_back("hpmc_fugacity");
result.push_back("hpmc_ntrial");
result.push_back("hpmc_insert_count");
result.push_back("hpmc_reinsert_count");
result.push_back("hpmc_free_volume_fraction");
result.push_back("hpmc_overlap_fraction");
result.push_back("hpmc_configurational_bias_ratio");
return result;
}
//! Get the value of a logged quantity
virtual Scalar getLogValue(const std::string& quantity, unsigned int timestep);
//! Method to scale the box
virtual bool attemptBoxResize(unsigned int timestep, const BoxDim& new_box);
//! Slot to be called when number of types changes
void slotNumTypesChange();
protected:
Scalar m_n_R; //!< Average depletant number density in free volume
unsigned int m_type; //!< Type of depletant particle to generate
GPUArray<hpmc_implicit_counters_t> m_implicit_count; //!< Counter of active cell cluster moves
hpmc_implicit_counters_t m_implicit_count_run_start; //!< Counter of active cell cluster moves at run start
hpmc_implicit_counters_t m_implicit_count_step_start; //!< Counter of active cell cluster moves at run start
std::vector<std::poisson_distribution<unsigned int> > m_poisson; //!< Poisson distribution
std::vector<Scalar> m_lambda; //!< Poisson distribution parameters per type
Scalar m_d_dep; //!< Depletant circumsphere diameter
GPUArray<Scalar> m_d_min; //!< Minimum sphere from which test depletant is excluded
GPUArray<Scalar> m_d_max; //!< Maximum sphere for test depletant insertion
std::vector<hoomd::detail::Saru> m_rng_depletant; //!< RNGs for depletant insertion
bool m_rng_initialized; //!< True if RNGs have been initialized
unsigned int m_n_trial; //!< Number of trial re-insertions per depletant
bool m_need_initialize_poisson; //!< Flag to tell if we need to initialize the poisson distribution
//! Take one timestep forward
virtual void update(unsigned int timestep);
//! Initalize Poisson distribution parameters
virtual void updatePoissonParameters();
//! Initialize the Poisson distributions
virtual void initializePoissonDistribution();
//! Set the nominal width appropriate for depletion interaction
virtual void updateCellWidth();
//! Generate a random depletant position in a sphere around a particle
template<class RNG>
inline void generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar d_min,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants);
/*! Generate a random depletant position in a region including the sphere around a particle,
restricted so that it does not intersect another sphere
*/
template<class RNG>
inline void generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar delta_other,
vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants,
vec3<Scalar> pos_sphere_other);
//! Try inserting a depletant in a configuration such that it overlaps with the particle in the old (new) configuration
inline bool insertDepletant(vec3<Scalar>& pos_depletant, const Shape& shape_depletant, unsigned int idx,
typename Shape::param_type *params, unsigned int *h_overlaps, unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation,
vec3<Scalar> pos_new, quat<Scalar>& orientation_new, const typename Shape::param_type& params_new,
unsigned int &overlap_checks, unsigned int &overlap_err_count, bool &overlap_shape, bool new_config);
};
/*! \param sysdef System definition
\param cl Cell list
\param seed Random number generator seed
NOTE: only 3d supported at this time
*/
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::IntegratorHPMCMonoImplicit(std::shared_ptr<SystemDefinition> sysdef,
unsigned int seed)
: IntegratorHPMCMono<Shape>(sysdef, seed), m_n_R(0), m_type(0), m_d_dep(0.0), m_rng_initialized(false), m_n_trial(0),
m_need_initialize_poisson(true)
{
this->m_exec_conf->msg->notice(5) << "Constructing IntegratorHPMCImplicit" << std::endl;
GPUArray<hpmc_implicit_counters_t> implicit_count(1,this->m_exec_conf);
m_implicit_count.swap(implicit_count);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
}
//! Destructor
template< class Shape >
IntegratorHPMCMonoImplicit< Shape >::~IntegratorHPMCMonoImplicit()
{
}
template <class Shape>
void IntegratorHPMCMonoImplicit<Shape>::slotNumTypesChange()
{
// call parent class method
IntegratorHPMCMono<Shape>::slotNumTypesChange();
m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX);
GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_min.swap(d_min);
GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf);
m_d_max.swap(d_max);
m_need_initialize_poisson = true;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updatePoissonParameters()
{
// Depletant diameter
quat<Scalar> o;
Shape shape_depletant(o, this->m_params[this->m_type]);
m_d_dep = shape_depletant.getCircumsphereDiameter();
// access GPUArrays
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::overwrite);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::overwrite);
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// test sphere diameter and volume
Shape shape_i(quat<Scalar>(), this->m_params[i_type]);
Scalar delta = shape_i.getCircumsphereDiameter()+m_d_dep;
h_d_max.data[i_type] = delta;
// volume of insertion sphere
Scalar V = Scalar(M_PI/6.0)*delta*delta*delta;
// Minimum diameter of colloid sphere in which depletant can be inserted without overlapping with other colloids
// Scalar d = std::max(Scalar(2.0)*shape_i.getInsphereRadius()-m_d_dep,0.0);
Scalar d = Scalar(0.0);
h_d_min.data[i_type] = d;
// subtract inner sphere from sampling volume
V -= Scalar(M_PI/6.0)*d*d*d;
// average number of depletants in volume
m_lambda[i_type] = this->m_n_R*V;
}
}
template<class Shape>
void IntegratorHPMCMonoImplicit< Shape >::initializePoissonDistribution()
{
m_poisson.resize(this->m_pdata->getNTypes());
for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type)
{
// parameter for Poisson distribution
Scalar lambda = m_lambda[i_type];
if (lambda <= Scalar(0.0))
{
// guard against invalid parameters
continue;
}
m_poisson[i_type] = std::poisson_distribution<unsigned int>(lambda);
}
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::updateCellWidth()
{
this->m_nominal_width = this->getMaxCoreDiameter();
if (m_n_R > Scalar(0.0))
{
// add range of depletion interaction
quat<Scalar> o;
Shape tmp(o, this->m_params[m_type]);
this->m_nominal_width += tmp.getCircumsphereDiameter();
// update image list range
this->m_extra_image_width = tmp.getCircumsphereDiameter();
}
// Account for patch width
if (this->m_patch)
{
Scalar max_extent = 0.0;
for (unsigned int typ = 0; typ < this->m_pdata->getNTypes(); typ++)
{
max_extent = std::max(max_extent, this->m_patch->getAdditiveCutoff(typ));
}
this->m_nominal_width = std::max(this->m_nominal_width, this->m_patch->getRCut() + max_extent);
}
this->m_exec_conf->msg->notice(5) << "IntegratorHPMCMonoImplicit: updating nominal width to " << this->m_nominal_width << std::endl;
}
template< class Shape >
void IntegratorHPMCMonoImplicit< Shape >::update(unsigned int timestep)
{
this->m_exec_conf->msg->notice(10) << "HPMCMonoImplicit update: " << timestep << std::endl;
IntegratorHPMC::update(timestep);
// update poisson distributions
if (m_need_initialize_poisson)
{
updatePoissonParameters();
initializePoissonDistribution();
m_need_initialize_poisson = false;
}
if (!m_rng_initialized)
{
unsigned int n_omp_threads = 1;
#ifdef _OPENMP
n_omp_threads = omp_get_max_threads();
#endif
// initialize a set of random number generators
for (unsigned int i = 0; i < n_omp_threads; ++i)
{
m_rng_depletant.push_back(hoomd::detail::Saru(timestep,this->m_seed+this->m_exec_conf->getRank(), i));
}
m_rng_initialized = true;
}
// get needed vars
ArrayHandle<hpmc_counters_t> h_counters(this->m_count_total, access_location::host, access_mode::readwrite);
hpmc_counters_t& counters = h_counters.data[0];
ArrayHandle<hpmc_implicit_counters_t> h_implicit_counters(m_implicit_count, access_location::host, access_mode::readwrite);
hpmc_implicit_counters_t& implicit_counters = h_implicit_counters.data[0];
m_implicit_count_step_start = implicit_counters;
const BoxDim& box = this->m_pdata->getBox();
unsigned int ndim = this->m_sysdef->getNDimensions();
#ifdef ENABLE_MPI
// compute the width of the active region
Scalar3 npd = box.getNearestPlaneDistance();
Scalar3 ghost_fraction = this->m_nominal_width / npd;
#endif
// Shuffle the order of particles for this step
this->m_update_order.resize(this->m_pdata->getN());
this->m_update_order.shuffle(timestep);
// update the AABB Tree
this->buildAABBTree();
// limit m_d entries so that particles cannot possibly wander more than one box image in one time step
this->limitMoveDistances();
// update the image list
this->updateImageList();
// combine the three seeds
std::vector<unsigned int> seed_seq(3);
seed_seq[0] = this->m_seed;
seed_seq[1] = timestep;
seed_seq[2] = this->m_exec_conf->getRank();
std::seed_seq seed(seed_seq.begin(), seed_seq.end());
// RNG for poisson distribution
std::mt19937 rng_poisson(seed);
if (this->m_prof) this->m_prof->push(this->m_exec_conf, "HPMC implicit");
// access depletant insertion sphere dimensions
ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::read);
// loop over local particles nselect times
for (unsigned int i_nselect = 0; i_nselect < this->m_nselect; i_nselect++)
{
// access particle data and system box
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<Scalar4> h_orientation(this->m_pdata->getOrientationArray(), access_location::host, access_mode::readwrite);
ArrayHandle<Scalar> h_diameter(this->m_pdata->getDiameters(), access_location::host, access_mode::read);
ArrayHandle<Scalar> h_charge(this->m_pdata->getCharges(), access_location::host, access_mode::read);
// access interaction matrix
ArrayHandle<unsigned int> h_overlaps(this->m_overlaps, access_location::host, access_mode::read);
//access move sizes
ArrayHandle<Scalar> h_d(this->m_d, access_location::host, access_mode::read);
ArrayHandle<Scalar> h_a(this->m_a, access_location::host, access_mode::read);
// loop through N particles in a shuffled order
for (unsigned int cur_particle = 0; cur_particle < this->m_pdata->getN(); cur_particle++)
{
unsigned int i = this->m_update_order[cur_particle];
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
Scalar4 orientation_i = h_orientation.data[i];
vec3<Scalar> pos_i = vec3<Scalar>(postype_i);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// only move particle if active
if (!isActive(make_scalar3(postype_i.x, postype_i.y, postype_i.z), box, ghost_fraction))
continue;
}
#endif
// make a trial move for i
hoomd::detail::Saru rng_i(i, this->m_seed + this->m_exec_conf->getRank()*this->m_nselect + i_nselect, timestep);
int typ_i = __scalar_as_int(postype_i.w);
Shape shape_i(quat<Scalar>(orientation_i), this->m_params[typ_i]);
unsigned int move_type_select = rng_i.u32() & 0xffff;
bool move_type_translate = !shape_i.hasOrientation() || (move_type_select < this->m_move_ratio);
Shape shape_old(quat<Scalar>(orientation_i), this->m_params[typ_i]);
vec3<Scalar> pos_old = pos_i;
if (move_type_translate)
{
move_translate(pos_i, rng_i, h_d.data[typ_i], ndim);
#ifdef ENABLE_MPI
if (this->m_comm)
{
// check if particle has moved into the ghost layer, and skip if it is
if (!isActive(vec_to_scalar3(pos_i), box, ghost_fraction))
continue;
}
#endif
}
else
{
move_rotate(shape_i.orientation, rng_i, h_a.data[typ_i], ndim);
}
// check for overlaps with neighboring particle's positions
bool overlap=false;
OverlapReal r_cut_patch = 0;
if (this->m_patch && !this->m_patch_log)
{
r_cut_patch = this->m_patch->getRCut() + 0.5*this->m_patch->getAdditiveCutoff(typ_i);
}
OverlapReal R_query = std::max(shape_i.getCircumsphereDiameter()/OverlapReal(2.0), r_cut_patch-this->getMinCoreDiameter()/(OverlapReal)2.0);
detail::AABB aabb_i_local = detail::AABB(vec3<Scalar>(0,0,0),R_query);
// patch + field interaction deltaU
double patch_field_energy_diff = 0;
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_i_image = pos_i + this->m_image_list[cur_image];
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
Scalar4 postype_j;
Scalar4 orientation_j;
// handle j==i situations
if ( j != i )
{
// load the position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
}
else
{
if (cur_image == 0)
{
// in the first image, skip i == j
continue;
}
else
{
// If this is particle i and we are in an outside image, use the translated position and orientation
postype_j = make_scalar4(pos_i.x, pos_i.y, pos_i.z, postype_i.w);
orientation_j = quat_to_scalar4(shape_i.orientation);
}
}
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]);
counters.overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_i.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
Scalar r_cut_ij = 0.0;
if (this->m_patch)
r_cut_ij = r_cut_patch + 0.5*this->m_patch->getAdditiveCutoff(typ_j);
if (h_overlaps.data[this->m_overlap_idx(typ_i,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_i, shape_j, counters.overlap_err_count))
{
overlap = true;
break;
}
// If there is no overlap and m_patch is not NULL, calculate energy
else if (this->m_patch && !this->m_patch_log && rsq <= r_cut_ij*r_cut_ij)
{
patch_field_energy_diff -= this->m_patch->energy(r_ij, typ_i,
quat<float>(shape_i.orientation),
h_diameter.data[i],
h_charge.data[i],
typ_j,
quat<float>(orientation_j),
h_diameter.data[j],
h_charge.data[j]
);
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
// whether the move is accepted
bool accept = !overlap;
// In most cases checking patch energy should be cheaper than computing
// depletants, so do that first. Calculate old patch energy only if
// m_patch not NULL and no overlaps. Note that we are computing U_old-U_new
// and then exponentiating directly (rather than exp(-(U_new-U_old)))
if (this->m_patch && !this->m_patch_log && accept)
{
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_i_image = pos_old + this->m_image_list[cur_image];
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
Scalar4 postype_j;
Scalar4 orientation_j;
// handle j==i situations
if ( j != i )
{
// load the position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
}
else
{
if (cur_image == 0)
{
// in the first image, skip i == j
continue;
}
else
{
// If this is particle i and we are in an outside image, use the translated position and orientation
postype_j = make_scalar4(pos_old.x, pos_old.y, pos_old.z, postype_i.w);
orientation_j = quat_to_scalar4(shape_old.orientation);
}
}
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]);
if (dot(r_ij,r_ij) <= r_cut_patch*r_cut_patch)
patch_field_energy_diff += this->m_patch->energy(r_ij,
typ_i,
quat<float>(orientation_i),
h_diameter.data[i],
h_charge.data[i],
typ_j,
quat<float>(orientation_j),
h_diameter.data[j],
h_charge.data[j]);
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
} // end loop over AABB nodes
} // end loop over images
// Add external energetic contribution
if (this->m_external)
{
patch_field_energy_diff += this->m_external->energydiff(i, pos_old, shape_old, pos_i, shape_i);
}
// Update acceptance based on patch, will only be reached if overlap check succeeded
accept = rng_i.d() < slow::exp(patch_field_energy_diff);
} // end if (m_patch)
// Depletant check
if (accept)
{
// log of acceptance probability
Scalar lnb(0.0);
unsigned int zero = 0;
// The trial move is valid. Now generate random depletant particles in a sphere
// of radius (d_max+d_depletant+move size)/2.0 around the original particle position
// draw number from Poisson distribution
unsigned int n = 0;
if (m_lambda[typ_i] > Scalar(0.0))
{
n = m_poisson[typ_i](rng_poisson);
}
unsigned int n_overlap_checks = 0;
unsigned int overlap_err_count = 0;
unsigned int insert_count = 0;
unsigned int reinsert_count = 0;
unsigned int free_volume_count = 0;
unsigned int overlap_count = 0;
volatile bool flag=false;
#pragma omp parallel for reduction(+ : lnb, n_overlap_checks, overlap_err_count, insert_count, reinsert_count, free_volume_count, overlap_count) reduction(max: zero) shared(flag) if (n>0) schedule(dynamic)
for (unsigned int k = 0; k < n; ++k)
{
if (flag)
{
#ifndef _OPENMP
break;
#else
continue;
#endif
}
insert_count++;
// generate a random depletant coordinate and orientation in the sphere around the new position
vec3<Scalar> pos_test;
quat<Scalar> orientation_test;
#ifdef _OPENMP
unsigned int thread_idx = omp_get_thread_num();
#else
unsigned int thread_idx = 0;
#endif
generateDepletant(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], h_d_min.data[typ_i], pos_test,
orientation_test, this->m_params[m_type]);
Shape shape_test(orientation_test, this->m_params[m_type]);
detail::AABB aabb_test_local = shape_test.getAABB(vec3<Scalar>(0,0,0));
bool overlap_depletant = false;
// Check if the new configuration of particle i generates an overlap
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
vec3<Scalar> r_ij = pos_i - pos_test_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_i, overlap_err_count))
{
overlap_depletant = true;
overlap_count++;
break;
}
}
if (overlap_depletant)
{
// check against overlap with old position
bool overlap_old = false;
// Check if the old configuration of particle i generates an overlap
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
vec3<Scalar> r_ij = vec3<Scalar>(h_postype.data[i]) - pos_test_image;
n_overlap_checks++;
// check circumsphere overlap
Shape shape_i_old(quat<Scalar>(h_orientation.data[i]), this->m_params[typ_i]);
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i_old.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_i_old, overlap_err_count))
{
overlap_old = true;
break;
}
}
if (!overlap_old)
{
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image];
detail::AABB aabb = aabb_test_local;
aabb.translate(pos_test_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
// we checked ptl i first
if (i == j) continue;
Scalar4 postype_j;
Scalar4 orientation_j;
// load the old position and orientation of the j particle
postype_j = h_postype.data[j];
orientation_j = h_orientation.data[j];
// put particles in coordinate system of particle i
vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_test_image;
unsigned int typ_j = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]);
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps.data[this->m_overlap_idx(m_type,typ_j)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_test, shape_j, overlap_err_count))
{
// depletant is ignored for any overlap in the old configuration
overlap_old = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap_old)
break;
} // end loop over AABB nodes
if (overlap_old)
break;
} // end loop over images
}
if (!overlap_old)
{
free_volume_count++;
}
else
{
// the depletant overlap doesn't count since it was already overlapping
// in the old configuration
overlap_depletant = false;
}
}
if (overlap_depletant && !m_n_trial)
{
zero = 1;
// break out of loop
flag = true;
}
else if (overlap_depletant && m_n_trial)
{
const typename Shape::param_type& params_depletant = this->m_params[m_type];
// Number of successful depletant insertions in new configuration
unsigned int n_success_new = 0;
// Number of allowed insertion trials (those which overlap with colloid at old position)
unsigned int n_overlap_shape_new = 0;
// diameter (around origin) in which we are guaruanteed to intersect with the shape
Scalar delta_insphere = Scalar(2.0)*shape_i.getInsphereRadius();
// same for old reverse move. Because we have already sampled one successful insertion
// that overlaps with the colloid at the new position, we increment by one (super-detailed
// balance)
unsigned int n_success_old = 1;
unsigned int n_overlap_shape_old = 1;
Scalar4& postype_i_old = h_postype.data[i];
vec3<Scalar> pos_i_old(postype_i_old);
quat<Scalar> orientation_i_old(h_orientation.data[i]);
for (unsigned int l = 0; l < m_n_trial; ++l)
{
// generate a random depletant position and orientation
// in both the old and the new configuration of the colloid particle
vec3<Scalar> pos_depletant_old, pos_depletant_new;
quat<Scalar> orientation_depletant_old, orientation_depletant_new;
// try moving the overlapping depletant in the excluded volume
// such that it overlaps with the particle at the old position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i_old, h_d_max.data[typ_i], delta_insphere,
pos_depletant_new, orientation_depletant_new, params_depletant, pos_i);
reinsert_count++;
Shape shape_depletant_new(orientation_depletant_new, params_depletant);
const typename Shape::param_type& params_i = this->m_params[__scalar_as_int(postype_i_old.w)];
bool overlap_shape = false;
if (insertDepletant(pos_depletant_new, shape_depletant_new, i, this->m_params.data(), h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, false))
{
n_success_new++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at old position
n_overlap_shape_new++;
}
if (l >= 1)
{
// as above, in excluded volume sphere at new position
generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], delta_insphere,
pos_depletant_old, orientation_depletant_old, params_depletant, pos_i_old);
Shape shape_depletant_old(orientation_depletant_old, params_depletant);
if (insertDepletant(pos_depletant_old, shape_depletant_old, i, this->m_params.data(), h_overlaps.data, typ_i,
h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i,
n_overlap_checks, overlap_err_count, overlap_shape, true))
{
n_success_old++;
}
if (overlap_shape)
{
// depletant overlaps with colloid at new position
n_overlap_shape_old++;
}
reinsert_count++;
}
n_overlap_checks += counters.overlap_checks;
overlap_err_count += counters.overlap_err_count;
} // end loop over re-insertion attempts
if (n_success_new != 0)
{
lnb += log((Scalar)n_success_new/(Scalar)n_overlap_shape_new);
lnb -= log((Scalar)n_success_old/(Scalar)n_overlap_shape_old);
}
else
{
zero = 1;
// break out of loop
flag = true;
}
} // end if depletant overlap
} // end loop over depletants
// increment counters
counters.overlap_checks += n_overlap_checks;
counters.overlap_err_count += overlap_err_count;
implicit_counters.insert_count += insert_count;
implicit_counters.free_volume_count += free_volume_count;
implicit_counters.overlap_count += overlap_count;
implicit_counters.reinsert_count += reinsert_count;
// apply acceptance criterium
if (!zero)
{
accept = rng_i.f() < exp(lnb);
}
else
{
accept = false;
}
} // end depletant placement
// if the move is accepted
if (accept)
{
// increment accept counter and assign new position
if (!shape_i.ignoreStatistics())
{
if (move_type_translate)
counters.translate_accept_count++;
else
counters.rotate_accept_count++;
}
// update the position of the particle in the tree for future updates
detail::AABB aabb = aabb_i_local;
aabb.translate(pos_i);
this->m_aabb_tree.update(i, aabb);
// update position of particle
h_postype.data[i] = make_scalar4(pos_i.x,pos_i.y,pos_i.z,postype_i.w);
if (shape_i.hasOrientation())
{
h_orientation.data[i] = quat_to_scalar4(shape_i.orientation);
}
}
else
{
if (!shape_i.ignoreStatistics())
{
// increment reject counter
if (move_type_translate)
counters.translate_reject_count++;
else
counters.rotate_reject_count++;
}
}
} // end loop over all particles
} // end loop over nselect
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// wrap particles back into box
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
box.wrap(h_postype.data[i], h_image.data[i]);
}
}
// perform the grid shift
#ifdef ENABLE_MPI
if (this->m_comm)
{
ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite);
ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite);
// precalculate the grid shift
hoomd::detail::Saru rng(timestep, this->m_seed, 0xf4a3210e);
Scalar3 shift = make_scalar3(0,0,0);
shift.x = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
shift.y = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
if (this->m_sysdef->getNDimensions() == 3)
{
shift.z = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0));
}
for (unsigned int i = 0; i < this->m_pdata->getN(); i++)
{
// read in the current position and orientation
Scalar4 postype_i = h_postype.data[i];
vec3<Scalar> r_i = vec3<Scalar>(postype_i); // translation from local to global coordinates
r_i += vec3<Scalar>(shift);
h_postype.data[i] = vec_to_scalar4(r_i, postype_i.w);
box.wrap(h_postype.data[i], h_image.data[i]);
}
this->m_pdata->translateOrigin(shift);
}
#endif
if (this->m_prof) this->m_prof->pop(this->m_exec_conf);
// migrate and exchange particles
this->communicate(true);
// all particle have been moved, the aabb tree is now invalid
this->m_aabb_tree_invalid = true;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param d_min Diameter of smaller sphere excluding depletant
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar d_min, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant)
{
// draw a random vector in the excluded volume sphere of the colloid
Scalar theta = rng.template s<Scalar>(Scalar(0.0),Scalar(2.0*M_PI));
Scalar z = rng.template s<Scalar>(Scalar(-1.0),Scalar(1.0));
// random normalized vector
vec3<Scalar> n(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(theta),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(theta),z);
// draw random radial coordinate in test sphere
Scalar r3 = rng.template s<Scalar>(fast::pow(d_min/delta,Scalar(3.0)),Scalar(1.0));
Scalar r = Scalar(0.5)*delta*fast::pow(r3,Scalar(1.0/3.0));
// test depletant position
vec3<Scalar> pos_depletant = pos_sphere+r*n;
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
pos = pos_depletant;
}
/* \param rng The random number generator
* \param pos_sphere Center of sphere
* \param delta diameter of sphere
* \param delta_other diameter of other sphere
* \param pos Position of depletant (return value)
* \param orientation ion of depletant (return value)
* \param params_depletant Depletant parameters
* \params pos_sphere_other Center of other sphere
*/
template<class Shape>
template<class RNG>
inline void IntegratorHPMCMonoImplicit<Shape>::generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta,
Scalar delta_other, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant,
vec3<Scalar> pos_sphere_other)
{
vec3<Scalar> r_ij = pos_sphere - pos_sphere_other;
Scalar d = fast::sqrt(dot(r_ij,r_ij));
Scalar rmin(0.0);
Scalar rmax = Scalar(0.5)*delta;
Scalar ctheta_min(-1.0);
bool do_rotate = false;
if (d > Scalar(0.0) && delta_other > Scalar(0.0))
{
// draw a random direction in the bounded sphereical shell
Scalar ctheta = (delta_other*delta_other+Scalar(4.0)*d*d-delta*delta)/(Scalar(4.0)*delta_other*d);
if (ctheta >= Scalar(-1.0) && ctheta < Scalar(1.0))
{
// true intersection, we can restrict angular sampling
ctheta_min = ctheta;
}
// is there an intersection?
if (Scalar(2.0)*d < delta+delta_other)
{
// sample in shell around smaller sphere
rmin = delta_other/Scalar(2.0);
rmax = d+delta/Scalar(2.0);
do_rotate = true;
}
}
// draw random radial coordinate in a spherical shell
Scalar r3 = rng.template s<Scalar>(fast::pow(rmin/rmax,Scalar(3.0)),Scalar(1.0));
Scalar r = rmax*fast::pow(r3,Scalar(1.0/3.0));
// random direction in spherical shell
Scalar z = rng.s(ctheta_min,Scalar(1.0));
Scalar phi = Scalar(2.0*M_PI)*rng.template s<Scalar>();
vec3<Scalar> n;
if (do_rotate)
{
vec3<Scalar> u(r_ij/d);
// normal vector
vec3<Scalar> v(cross(u,vec3<Scalar>(0,0,1)));
if (dot(v,v) < EPSILON)
{
v = cross(u,vec3<Scalar>(0,1,0));
}
v *= fast::rsqrt(dot(v,v));
quat<Scalar> q(quat<Scalar>::fromAxisAngle(u,phi));
n = z*u+(fast::sqrt(Scalar(1.0)-z*z))*rotate(q,v);
}
else
{
n = vec3<Scalar>(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(phi),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(phi),z);
}
// test depletant position
pos = r*n;
if (do_rotate)
{
// insert such that it potentially intersects the sphere, but not the other one
pos += pos_sphere_other;
}
else
{
// insert in sphere
pos += pos_sphere;
}
Shape shape_depletant(quat<Scalar>(), params_depletant);
if (shape_depletant.hasOrientation())
{
orientation = generateRandomOrientation(rng);
}
}
/*! \param pos_depletant Depletant position
* \param shape_depletant Depletant shape
* \param idx Index of updated particle
* \param h_overlaps Interaction matrix
* \param typ_i type of updated particle
* \param h_orientation ion array
* \param pos_new New position of updated particle
* \param orientation_new New orientation of updated particle
* \param params_new New shape parameters of updated particle
* \param counters HPMC overlap counters
*/
template<class Shape>
inline bool IntegratorHPMCMonoImplicit<Shape>::insertDepletant(vec3<Scalar>& pos_depletant,
const Shape& shape_depletant, unsigned int idx, typename Shape::param_type *params, unsigned int *h_overlaps,
unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation, vec3<Scalar> pos_new, quat<Scalar>& orientation_new,
const typename Shape::param_type& params_new, unsigned int &n_overlap_checks,
unsigned int &overlap_err_count, bool& overlap_shape, bool new_config)
{
overlap_shape=false;
detail::AABB aabb_depletant_local = shape_depletant.getAABB(vec3<Scalar>(0,0,0));
// now check if depletant overlaps with moved particle in the old configuration
Shape shape_i(quat<Scalar>(), params_new);
if (shape_i.hasOrientation())
{
if (! new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
vec3<Scalar> pos_i;
if (!new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(typ_i, m_type)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
overlap_shape = true;
}
// same, but for reverse move
if (shape_i.hasOrientation())
{
if (new_config)
{
// load old orientation
Scalar4 orientation_i = h_orientation[idx];
shape_i.orientation = quat<Scalar>(orientation_i);
}
else
{
shape_i.orientation = orientation_new;
}
}
if (new_config)
{
// load old position
pos_i = vec3<Scalar>(h_postype[idx]);
}
else
{
pos_i = pos_new;
}
// only need to consider the (0,0,0) image
aabb = aabb_depletant_local;
aabb.translate(pos_depletant);
// put particles in coordinate system of depletant
r_ij = pos_i - pos_depletant;
n_overlap_checks++;
// test circumsphere overlap
rsq = dot(r_ij,r_ij);
DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter();
circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
// check for overlaps with neighboring particle's positions
bool overlap=false;
if (h_overlaps[this->m_overlap_idx(m_type, typ_i)]
&& circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count))
{
// if we are already overlapping in the other configuration, this doesn't count as an insertion
overlap = true;
}
if (!overlap && overlap_shape)
{
// All image boxes (including the primary)
const unsigned int n_images = this->m_image_list.size();
for (unsigned int cur_image = 0; cur_image < n_images; cur_image++)
{
vec3<Scalar> pos_depletant_image = pos_depletant + this->m_image_list[cur_image];
detail::AABB aabb = aabb_depletant_local;
aabb.translate(pos_depletant_image);
// stackless search
for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++)
{
if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb))
{
if (this->m_aabb_tree.isNodeLeaf(cur_node_idx))
{
for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++)
{
// read in its position and orientation
unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p);
// load the position and orientation of the j particle
Scalar4 postype_j = h_postype[j];
vec3<Scalar> pos_j(postype_j);
Scalar4 orientation_j = h_orientation[j];
unsigned int type = __scalar_as_int(postype_j.w);
Shape shape_j(quat<Scalar>(orientation_j), params[type]);
if (j == idx)
{
// we have already exclued overlap with the moved particle above
continue;
}
// put particles in coordinate system of depletant
vec3<Scalar> r_ij = pos_j - pos_depletant_image;
n_overlap_checks++;
// check circumsphere overlap
OverlapReal rsq = dot(r_ij,r_ij);
OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter();
bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb);
if (h_overlaps[this->m_overlap_idx(type, m_type)]
&& circumsphere_overlap
&& test_overlap(r_ij, shape_depletant, shape_j, overlap_err_count))
{
overlap = true;
break;
}
}
}
}
else
{
// skip ahead
cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx);
}
if (overlap)
break;
} // end loop over AABB nodes
if (overlap)
break;
} // end loop over images
} // end if overlap with shape
return overlap_shape && !overlap;
}
/*! \param quantity Name of the log quantity to get
\param timestep Current time step of the simulation
\return the requested log quantity.
*/
template<class Shape>
Scalar IntegratorHPMCMonoImplicit<Shape>::getLogValue(const std::string& quantity, unsigned int timestep)
{
if (quantity == "hpmc_fugacity")
{
return (Scalar) m_n_R;
}
if (quantity == "hpmc_ntrial")
{
return (Scalar) m_n_trial;
}
hpmc_counters_t counters = IntegratorHPMC::getCounters(2);
hpmc_implicit_counters_t implicit_counters = getImplicitCounters(2);
if (quantity == "hpmc_insert_count")
{
// return number of depletant insertions per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.insert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_reinsert_count")
{
// return number of overlapping depletants reinserted per colloid
if (counters.getNMoves() > 0)
return (Scalar)implicit_counters.reinsert_count/(Scalar)counters.getNMoves();
else
return Scalar(0.0);
}
if (quantity == "hpmc_free_volume_fraction")
{
// return fraction of free volume in depletant insertion sphere
return (Scalar) implicit_counters.getFreeVolumeFraction();
}
if (quantity == "hpmc_overlap_fraction")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getOverlapFraction();
}
if (quantity == "hpmc_configurational_bias_ratio")
{
// return fraction of overlapping depletants after trial move
return (Scalar) implicit_counters.getConfigurationalBiasRatio();
}
//nothing found -> pass on to base class
return IntegratorHPMCMono<Shape>::getLogValue(quantity, timestep);
}
/*! \param mode 0 -> Absolute count, 1 -> relative to the start of the run, 2 -> relative to the last executed step
\return The current state of the acceptance counters
IntegratorHPMCMonoImplicit maintains a count of the number of accepted and rejected moves since instantiation. getCounters()
provides the current value. The parameter *mode* controls whether the returned counts are absolute, relative
to the start of the run, or relative to the start of the last executed step.
*/
template<class Shape>
hpmc_implicit_counters_t IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters(unsigned int mode)
{
ArrayHandle<hpmc_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read);
hpmc_implicit_counters_t result;
if (mode == 0)
result = h_counters.data[0];
else if (mode == 1)
result = h_counters.data[0] - m_implicit_count_run_start;
else
result = h_counters.data[0] - m_implicit_count_step_start;
#ifdef ENABLE_MPI
if (this->m_comm)
{
// MPI Reduction to total result values on all ranks
MPI_Allreduce(MPI_IN_PLACE, &result.insert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.free_volume_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.overlap_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
MPI_Allreduce(MPI_IN_PLACE, &result.reinsert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator());
}
#endif
return result;
}
/*! NPT simulations are not supported with implicit depletants
(The Nmu_ptPT ensemble is instable)
\returns false if resize results in overlaps
*/
template<class Shape>
bool IntegratorHPMCMonoImplicit<Shape>::attemptBoxResize(unsigned int timestep, const BoxDim& new_box)
{
this->m_exec_conf->msg->error() << "Nmu_pPT simulations are unsupported." << std::endl;
throw std::runtime_error("Error during implicit depletant integration\n");
}
//! Export this hpmc integrator to python
/*! \param name Name of the class in the exported python module
\tparam Shape An instantiation of IntegratorHPMCMono<Shape> will be exported
*/
template < class Shape > void export_IntegratorHPMCMonoImplicit(pybind11::module& m, const std::string& name)
{
pybind11::class_<IntegratorHPMCMonoImplicit<Shape>, std::shared_ptr< IntegratorHPMCMonoImplicit<Shape> > >(m, name.c_str(), pybind11::base< IntegratorHPMCMono<Shape> >())
.def(pybind11::init< std::shared_ptr<SystemDefinition>, unsigned int >())
.def("setDepletantDensity", &IntegratorHPMCMonoImplicit<Shape>::setDepletantDensity)
.def("setDepletantType", &IntegratorHPMCMonoImplicit<Shape>::setDepletantType)
.def("setNTrial", &IntegratorHPMCMonoImplicit<Shape>::setNTrial)
.def("getNTrial", &IntegratorHPMCMonoImplicit<Shape>::getNTrial)
.def("getImplicitCounters", &IntegratorHPMCMonoImplicit<Shape>::getImplicitCounters)
;
}
//! Export the counters for depletants
inline void export_hpmc_implicit_counters(pybind11::module& m)
{
pybind11::class_< hpmc_implicit_counters_t >(m, "hpmc_implicit_counters_t")
.def_readwrite("insert_count", &hpmc_implicit_counters_t::insert_count)
.def_readwrite("reinsert_count", &hpmc_implicit_counters_t::reinsert_count)
.def_readwrite("free_volume_count", &hpmc_implicit_counters_t::free_volume_count)
.def_readwrite("overlap_count", &hpmc_implicit_counters_t::overlap_count)
.def("getFreeVolumeFraction", &hpmc_implicit_counters_t::getFreeVolumeFraction)
.def("getOverlapFraction", &hpmc_implicit_counters_t::getOverlapFraction)
.def("getConfigurationalBiasRatio", &hpmc_implicit_counters_t::getConfigurationalBiasRatio)
;
}
} // end namespace hpmc
#endif // __HPMC_MONO_IMPLICIT__H__
|
GB_subassign_02.c | //------------------------------------------------------------------------------
// GB_subassign_02: C(I,J) = A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 02: C(I,J) = A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: NULL
// A: matrix
// S: constructed
#define GB_FREE_WORK GB_FREE_TWO_SLICE
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_02
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix A,
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ;
GB_GET_A ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 02: C(I,J) = A ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in A+S must be examined, so the work is
// Omega (nnz(A)+nnz(S)).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
//--------------------------------------------------------------------------
// Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
GB_SUBASSIGN_TWO_SLICE (A, S) ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-----------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-----------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =A ): copy A into C, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_noaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list S (:,j) has entries. List A (:,j) exhausted.
while (pS < pS_end)
{
// ----[C . 1] or [X . 1]---------------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( delete ): becomes zombie
// [X . 1]: action: ( X ): still a zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]--------------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = Ai [pA] ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.