source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
Trainer.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _TRAINER_
#define _TRAINER_
#include <float.h>
#include <limits.h>
#include <stdio.h>
#include <vector>
#include "defines.h"
// Contains times / losses / etc
struct TrainStatistics {
std::vector<double> times;
std::vector<double> losses;
};
typedef struct TrainStatistics TrainStatistics;
class Trainer {
protected:
void TrackTimeLoss(double cur_time, double cur_loss, TrainStatistics *stats) {
stats->times.push_back(cur_time);
stats->losses.push_back(cur_loss);
}
void PrintPartitionTime(Timer &timer) { printf("Partition Time(s): %f\n", timer.Elapsed()); }
void PrintTimeLoss(double cur_time, double cur_loss, int epoch) {
printf("Epoch: %d\tTime(s): %f\tLoss: %lf\t\n", epoch, cur_time, cur_loss);
}
void EpochBegin(int epoch, Timer &gradient_timer, Model *model, const std::vector<Datapoint *> &datapoints,
TrainStatistics *stats) {
double cur_time = gradient_timer.Elapsed();
double cur_loss = model->ComputeLoss(datapoints);
TrackTimeLoss(cur_time, cur_loss, stats);
if (FLAGS_print_loss_per_epoch && epoch % FLAGS_interval_print == 0) {
PrintTimeLoss(cur_time, cur_loss, epoch);
}
}
public:
Trainer() {
/*
// Some error checking.
if (FLAGS_n_threads > std::thread::hardware_concurrency()) {
std::cerr << "Trainer: Number of threads is greater than the number of physical cores." << std::endl;
// exit(0);
}
// Basic set up, like pinning to core, setting number of threads.
omp_set_num_threads(FLAGS_n_threads);
#pragma omp parallel
{ pin_to_core(omp_get_thread_num()); }
*/
}
virtual ~Trainer() {}
// Main training method.
virtual TrainStatistics Train(Model *model, const std::vector<Datapoint *> &datapoints, Updater *updater) = 0;
};
#endif
|
schedule-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
void main(int argc, char **argv) {
int i, n=16, chunk, a[n], suma=0;
if(argc<2){
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
//n = atoi(argv[2]);
for(i=0;i<n;i++) a[i]=i;
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(static,chunk)
for(i=0;i<n;i++){
suma = suma + a[i];
printf("thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
GB_unaryop__lnot_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_bool
// op(A') function: GB_tran__lnot_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_bool
(
uint16_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__land_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int64)
// A*D function (colscale): GB (_AxD__land_int64)
// D*A function (rowscale): GB (_DxB__land_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int64)
// C=scalar+B GB (_bind1st__land_int64)
// C=scalar+B' GB (_bind1st_tran__land_int64)
// C=A+scalar GB (_bind2nd__land_int64)
// C=A'+scalar GB (_bind2nd_tran__land_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
box_coder_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 };
inline BoxCodeType GetBoxCodeType(const std::string& type) {
if (type == "encode_center_size") {
return BoxCodeType::kEncodeCenterSize;
} else if (type == "decode_center_size") {
return BoxCodeType::kDecodeCenterSize;
}
PADDLE_THROW("Not support type %s.", type);
}
template <typename DeviceContext, typename T>
class BoxCoderKernel : public framework::OpKernel<T> {
public:
void EncodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = prior_box->dims()[0];
int64_t len = prior_box->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
T prior_box_width = prior_box_data[j * len + 2] -
prior_box_data[j * len] + (normalized == false);
T prior_box_height = prior_box_data[j * len + 3] -
prior_box_data[j * len + 1] +
(normalized == false);
T prior_box_center_x =
(prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2;
T prior_box_center_y =
(prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2;
T target_box_center_x =
(target_box_data[i * len + 2] + target_box_data[i * len]) / 2;
T target_box_center_y =
(target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2;
T target_box_width = target_box_data[i * len + 2] -
target_box_data[i * len] + (normalized == false);
T target_box_height = target_box_data[i * len + 3] -
target_box_data[i * len + 1] +
(normalized == false);
size_t offset = i * col * len + j * len;
output[offset] =
(target_box_center_x - prior_box_center_x) / prior_box_width;
output[offset + 1] =
(target_box_center_y - prior_box_center_y) / prior_box_height;
output[offset + 2] =
std::log(std::fabs(target_box_width / prior_box_width));
output[offset + 3] =
std::log(std::fabs(target_box_height / prior_box_height));
if (prior_box_var) {
output[offset] /= prior_box_var_data[j * len];
output[offset + 1] /= prior_box_var_data[j * len + 1];
output[offset + 2] /= prior_box_var_data[j * len + 2];
output[offset + 3] /= prior_box_var_data[j * len + 3];
}
}
}
}
void DecodeCenterSize(const framework::Tensor* target_box,
const framework::Tensor* prior_box,
const framework::Tensor* prior_box_var,
const bool normalized, T* output) const {
int64_t row = target_box->dims()[0];
int64_t col = prior_box->dims()[0];
int64_t len = prior_box->dims()[1];
auto* target_box_data = target_box->data<T>();
auto* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = nullptr;
if (prior_box_var) prior_box_var_data = prior_box_var->data<T>();
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int64_t i = 0; i < row; ++i) {
for (int64_t j = 0; j < col; ++j) {
size_t offset = i * col * len + j * len;
T prior_box_width = prior_box_data[j * len + 2] -
prior_box_data[j * len] + (normalized == false);
T prior_box_height = prior_box_data[j * len + 3] -
prior_box_data[j * len + 1] +
(normalized == false);
T prior_box_center_x =
(prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2;
T prior_box_center_y =
(prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2;
T target_box_center_x = 0, target_box_center_y = 0;
T target_box_width = 0, target_box_height = 0;
if (prior_box_var) {
target_box_center_x = prior_box_var_data[j * len] *
target_box_data[offset] * prior_box_width +
prior_box_center_x;
target_box_center_y = prior_box_var_data[j * len + 1] *
target_box_data[offset + 1] *
prior_box_height +
prior_box_center_y;
target_box_width = std::exp(prior_box_var_data[j * len + 2] *
target_box_data[offset + 2]) *
prior_box_width;
target_box_height = std::exp(prior_box_var_data[j * len + 3] *
target_box_data[offset + 3]) *
prior_box_height;
} else {
target_box_center_x =
target_box_data[offset] * prior_box_width + prior_box_center_x;
target_box_center_y = target_box_data[offset + 1] * prior_box_height +
prior_box_center_y;
target_box_width =
std::exp(target_box_data[offset + 2]) * prior_box_width;
target_box_height =
std::exp(target_box_data[offset + 3]) * prior_box_height;
}
output[offset] = target_box_center_x - target_box_width / 2;
output[offset + 1] = target_box_center_y - target_box_height / 2;
output[offset + 2] =
target_box_center_x + target_box_width / 2 - (normalized == false);
output[offset + 3] =
target_box_center_y + target_box_height / 2 - (normalized == false);
}
}
}
void Compute(const framework::ExecutionContext& context) const override {
auto* prior_box = context.Input<framework::Tensor>("PriorBox");
auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* output_box = context.Output<framework::Tensor>("OutputBox");
if (target_box->lod().size()) {
PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL,
"Only support 1 level of LoD.");
}
auto row = target_box->dims()[0];
auto col = prior_box->dims()[0];
auto len = prior_box->dims()[1];
output_box->mutable_data<T>({row, col, len}, context.GetPlace());
auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type"));
bool normalized = context.Attr<bool>("box_normalized");
T* output = output_box->data<T>();
if (code_type == BoxCodeType::kEncodeCenterSize) {
EncodeCenterSize(target_box, prior_box, prior_box_var, normalized,
output);
} else if (code_type == BoxCodeType::kDecodeCenterSize) {
DecodeCenterSize(target_box, prior_box, prior_box_var, normalized,
output);
}
}
};
} // namespace operators
} // namespace paddle
|
miniGMG.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef OMP
#include <omp.h>
#endif
#ifdef __MPI
#include <mpi.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "defines.h"
#include "box.h"
#include "mg.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
void MGResetTimers(domain_type * domain);
int main(int argc, char **argv){
int MPI_Rank=0;
int MPI_Tasks=1;
int OMP_Threads = 1;
#ifdef OMP
#pragma omp parallel
{
#pragma omp master
{
OMP_Threads = omp_get_num_threads();
}
}
#endif
#ifdef __MPI
#warning Compiling for MPI...
int MPI_threadingModel = -1;
//int MPI_threadingModelRequested = MPI_THREAD_SINGLE;
//int MPI_threadingModelRequested = MPI_THREAD_SERIALIZED;
int MPI_threadingModelRequested = MPI_THREAD_FUNNELED;
//int MPI_threadingModelRequested = MPI_THREAD_MULTIPLE;
#ifdef __MPI_THREAD_MULTIPLE
MPI_threadingModelRequested = MPI_THREAD_MULTIPLE;
#endif
MPI_Init_thread(&argc, &argv, MPI_threadingModelRequested, &MPI_threadingModel);
MPI_Comm_size(MPI_COMM_WORLD, &MPI_Tasks);
MPI_Comm_rank(MPI_COMM_WORLD, &MPI_Rank);
if(MPI_threadingModel>MPI_threadingModelRequested)MPI_threadingModel=MPI_threadingModelRequested;
if(MPI_Rank==0){
if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else if(MPI_threadingModelRequested == MPI_THREAD_SINGLE )printf("Requested MPI_THREAD_SINGLE, ");
else if(MPI_threadingModelRequested == MPI_THREAD_FUNNELED )printf("Requested MPI_THREAD_FUNNELED, ");
else if(MPI_threadingModelRequested == MPI_THREAD_SERIALIZED)printf("Requested MPI_THREAD_SERIALIZED, ");
else if(MPI_threadingModelRequested == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else printf("Requested Unknown MPI Threading Model (%d), ",MPI_threadingModelRequested);
if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else if(MPI_threadingModel == MPI_THREAD_SINGLE )printf("got MPI_THREAD_SINGLE\n");
else if(MPI_threadingModel == MPI_THREAD_FUNNELED )printf("got MPI_THREAD_FUNNELED\n");
else if(MPI_threadingModel == MPI_THREAD_SERIALIZED)printf("got MPI_THREAD_SERIALIZED\n");
else if(MPI_threadingModel == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else printf("got Unknown MPI Threading Model (%d)\n",MPI_threadingModel);
fflush(stdout); }
#ifdef __MPI_THREAD_MULTIPLE
if( (MPI_threadingModelRequested == MPI_THREAD_MULTIPLE) && (MPI_threadingModel != MPI_THREAD_MULTIPLE) ){MPI_Finalize();exit(0);}
#endif
#endif
int log2_subdomain_dim = 6;
int subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim);
int subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim);
int subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim);
int ranks_in_i=1;
int ranks_in_j=1;
int ranks_in_k=1;
if(argc==2){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=256 / (1<<log2_subdomain_dim);
subdomains_per_rank_in_j=256 / (1<<log2_subdomain_dim);
subdomains_per_rank_in_k=256 / (1<<log2_subdomain_dim);
}else if(argc==5){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=atoi(argv[2]);
subdomains_per_rank_in_j=atoi(argv[3]);
subdomains_per_rank_in_k=atoi(argv[4]);
}else if(argc==8){
log2_subdomain_dim=atoi(argv[1]);
subdomains_per_rank_in_i=atoi(argv[2]);
subdomains_per_rank_in_j=atoi(argv[3]);
subdomains_per_rank_in_k=atoi(argv[4]);
ranks_in_i=atoi(argv[5]);
ranks_in_j=atoi(argv[6]);
ranks_in_k=atoi(argv[7]);
}else if(argc!=1){
if(MPI_Rank==0){printf("usage: ./a.out [log2_subdomain_dim] [subdomains per rank in i,j,k] [ranks in i,j,k]\n");}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
/*
if(log2_subdomain_dim>7){
if(MPI_Rank==0){printf("error, log2_subdomain_dim(%d)>7\n",log2_subdomain_dim);}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
*/
if(ranks_in_i*ranks_in_j*ranks_in_k != MPI_Tasks){
if(MPI_Rank==0){printf("error, ranks_in_i*ranks_in_j*ranks_in_k(%d*%d*%d=%d) != MPI_Tasks(%d)\n",ranks_in_i,ranks_in_j,ranks_in_k,ranks_in_i*ranks_in_j*ranks_in_k,MPI_Tasks);}
#ifdef __MPI
MPI_Finalize();
#endif
exit(0);
}
if(MPI_Rank==0)printf("%d MPI Tasks of %d threads\n",MPI_Tasks,OMP_Threads);
int subdomain_dim_i=1<<log2_subdomain_dim;
int subdomain_dim_j=1<<log2_subdomain_dim;
int subdomain_dim_k=1<<log2_subdomain_dim;
// fine dim = 128 64 32 16 8 4
// levels = 6 5 4 3 2 1
//int log2_coarse_dim = 2; // i.e. coarsen to 4^3
int log2_coarse_dim = 1; // i.e. coarsen to 2^3
int levels_in_vcycle=1+log2_subdomain_dim-log2_coarse_dim; // ie 1+log2(fine grid size)-log2(bottom grid size)
if(MPI_Rank==0){printf("truncating the v-cycle at %d^3 subdomains\n",1<<log2_coarse_dim);fflush(stdout);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int box;
domain_type domain;
int boundary_conditions[3] = {__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC,__BOUNDARY_PERIODIC}; // i-, j-, and k-directions
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
create_domain(&domain,
subdomain_dim_i,subdomain_dim_j,subdomain_dim_k,
subdomains_per_rank_in_i,subdomains_per_rank_in_j,subdomains_per_rank_in_k,
ranks_in_i,ranks_in_j,ranks_in_k,
MPI_Rank,
boundary_conditions,
__NumGrids,1,levels_in_vcycle);
double h0=1.0/((double)(domain.dim.i));
if(MPI_Rank==0){printf("initializing alpha, beta, RHS for the ``hard problem''...");fflush(stdout);}
double a=0.9; // i.e. good Helmholtz
double b=0.9;
initialize_problem(&domain,0,h0,a,b);
if(MPI_Rank==0){printf("done\n");fflush(stdout);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
MGBuild(&domain,a,b,h0); // restrictions, dominant eigenvalue, etc...
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int s,sMax=2;
#ifdef __MPI
sMax=4;
#endif
//Make initial an guess for u(=0)... Solve Lu=f to precision of 1e-10...print the benchmarking timing results...
MGResetTimers(&domain);for(s=0;s<sMax;s++){zero_grid(&domain,0,__u); MGSolve(&domain,__u,__f,a,b,1e-15);}print_timing(&domain);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// calculate error...
double h3 = h0*h0*h0;
add_grids(&domain,0,__temp,1.0,__u_exact,-1.0,__u); // __temp = __u_exact - __u
double max = norm(&domain,0,__temp); // max norm of error function
double error = sqrt( dot(&domain,0,__temp,__temp)*h3); // normalized L2 error ?
if(MPI_Rank==0){printf("Error test: h = %e, max = %e\n",h0,max);}
if(MPI_Rank==0){printf("Error test: h = %e, L2 = %e\n",h0,error);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
destroy_domain(&domain);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef __MPI
MPI_Finalize();
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return(0);
}
|
smacof_helper.c | /******************************************************************************
* Copyright (c) 2015 - 2016 Philipp Schubert. *
* All rights reserved. This program and the accompanying materials are made *
* available under the terms of LICENSE.txt. *
* *
* Contributors: *
* Philipp Schubert *
*****************************************************************************/
/**
* @file smacof_helper.c
* @brief Implementiert die Funktionsprototypen aus smacof_helper.h der für den
* SMACOF Algorithmus benötigten Funktionen.
*
* Diese Datei enthält die Implementationen der Funktionsprototypen der
* benötigten Funktionen, zur Formulierung des SMACOF Algorithmus.
*
* @author Philipp D. Schubert
* @bug Keine Bugs bekannt.
*/
#include "smacof_helper.h"
#include "cuda_disfuncs.cuh"
#include "cuda_linalg.cuh"
#include "cuda_smacof_helper.cuh"
#include "disfuncs.h"
#include "linalg.h"
#include "m.h"
#include "tm.h"
#include "utils.h"
#include <math.h>
#include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
m_t *guttmanTransformation(const tm_t *delta, const m_t *z, const tm_t *w,
const tm_t *pinv) {
m_t *update;
const real_t norm = 1.0 / delta->size;
// OPENMP C version of guttman-transformation
tm_t *dz = calcDeltaMatrix(z, EUCLIDEAN, 2);
tm_t *bz = computeBofZ(delta, dz, w);
m_t *bztimesz = TMmultM(bz, z);
if (w == NULL) {
// formula in 'linear' math: X_update = n^-1 * B(Z) * Z
update = Mmultscalar(bztimesz, norm);
} else {
update = TMmultM(pinv, bztimesz);
}
// cleanup temporary stuff
freeTM(dz);
dz = NULL;
freeTM(bz);
bz = NULL;
freeM(bztimesz);
bztimesz = NULL;
return update;
}
m_t *guttmanTransformation_nomem(const tm_t *delta, m_t *z, m_t *x,
const tm_t *w, const tm_t *pinv,
const tm_t *dz, tm_t *bz) {
const real_t norm = 1.0 / delta->size;
// OPENMP C version of guttman-transformation
computeBofZ_nomem(delta, dz, w, bz);
TMmultM_nomem(bz, z, x);
if (w == NULL) {
// formula in 'linear' math: X_update = n^-1 * B(Z) * Z
Mmultscalar_nomem(x, norm);
return x;
} else {
// formula in 'linear' math: X_update = V+ * B(Z) * Z
TMmultM_nomem(pinv, x, z);
memcpy(x->elems, z->elems, z->num_elems * sizeof(real_t));
return x;
}
}
tm_t *computeV(const tm_t *w) {
tm_t *v = initTM(w->size);
unsigned int i, j;
#pragma omp parallel for shared(w, v) private(i) schedule(static)
for (i = 0; i < w->num_elems; ++i) {
v->elems[i] = -w->elems[i];
}
real_t sum = 0;
#pragma omp parallel for shared(v) private(i, j, sum) schedule(dynamic)
for (i = 0; i < v->size; ++i) {
sum = 0;
for (j = 0; j < v->size; ++j) {
sum += (j <= i) ? v->elems[(i * (i + 1)) / 2 + j]
: v->elems[(j * (j + 1)) / 2 + i];
}
v->elems[(i * (i + 1)) / 2 + i] = -sum;
}
return v;
}
real_t computesigma(const m_t *x, const tm_t *delta, const tm_t *w) {
real_t sigma = 0;
unsigned int i;
tm_t *dx = calcDeltaMatrix(x, EUCLIDEAN, 2);
CERROR(dx->size != delta->size, "bad dimension to compute sigma");
if (w == NULL) {
#pragma omp parallel for shared(delta,dx) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += powf(delta->elems[i] - dx->elems[i], 2);
}
} // if we have weights, we have to use a more advanced formula
else {
#pragma omp parallel for shared(delta,dx,w) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += w->elems[i] * powf(delta->elems[i] - dx->elems[i], 2);
}
}
// cleanup temporary stuff
freeTM(dx);
dx = NULL;
return sigma;
}
real_t computesigma_nomem(const tm_t *dx, const tm_t *delta, const tm_t *w) {
real_t sigma = 0;
unsigned int i;
CERROR(dx->size != delta->size, "bad dimension to compute sigma");
if (w == NULL) {
#pragma omp parallel for shared(delta,dx) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += powf(delta->elems[i] - dx->elems[i], 2);
}
} // if we have weights, we have to use a more advanced formula
else {
#pragma omp parallel for shared(delta,dx,w) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += w->elems[i] * powf(delta->elems[i] - dx->elems[i], 2);
}
}
return sigma;
}
tm_t *computeBofZ(const tm_t *delta, const tm_t *dz, const tm_t *w) {
tm_t *b = initTM(delta->size);
unsigned int i, j;
if (w == NULL) {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -(delta->elems[(i * (i + 1) / 2) + j] /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
} else {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -((w->elems[(i * (i + 1) / 2) + j] *
delta->elems[(i * (i + 1) / 2) + j]) /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
}
// Caution: diagonal entries have to be calculated at last!
real_t sumcol;
#pragma omp parallel for shared(b) private(i, j, sumcol) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
sumcol = 0;
for (j = 0; j < b->size; ++j) {
if (j < i)
sumcol += b->elems[(i * (i + 1) / 2) + j];
else
sumcol += b->elems[(j * (j + 1) / 2) + i];
}
b->elems[(i * (i + 1) / 2) + i] = -sumcol;
}
return b;
}
void computeBofZ_nomem(const tm_t *delta, const tm_t *dz, const tm_t *w,
tm_t *b) {
unsigned int i, j;
if (w == NULL) {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -(delta->elems[(i * (i + 1) / 2) + j] /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
} else {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -((w->elems[(i * (i + 1) / 2) + j] *
delta->elems[(i * (i + 1) / 2) + j]) /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
}
// Caution: diagonal entries have to be calculated at last!
real_t sumcol;
#pragma omp parallel for shared(b) private(i, j, sumcol) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
sumcol = 0;
for (j = 0; j < b->size; ++j) {
if (i == j) {
} else if (j < i) {
sumcol += b->elems[(i * (i + 1) / 2) + j];
} else {
sumcol += b->elems[(j * (j + 1) / 2) + i];
}
}
b->elems[(i * (i + 1) / 2) + i] = -sumcol;
}
}
m_t *generateRandM(const unsigned int rows, const unsigned int cols,
const unsigned int min, const unsigned int max) {
m_t *mrand = initM(rows, cols);
// initialize the random generator (never try something parallel with c random
// functions!)
#if SEEDED == 1
srand(time(NULL));
#endif
for (unsigned int i = 0; i < mrand->num_elems; ++i) {
mrand->elems[i] = min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
return mrand;
}
tm_t *convertMtoTM(const m_t *tmp_w) {
CERROR(tmp_w->rows != tmp_w->cols,
"weight matrix of type m has wrong dimensions");
tm_t *w = initTM(tmp_w->rows);
unsigned int i, j;
#pragma omp parallel for shared(w, tmp_w) private(i, j) schedule(dynamic)
for (i = 0; i < tmp_w->rows; ++i) {
for (j = 0; j < i; ++j) {
w->elems[(i * (i + 1)) / 2 + j] = tmp_w->elems[i * tmp_w->cols + j];
}
}
return w;
}
tm_t *get_test_delta() {
tm_t *test_delta = initTM(4);
test_delta->elems[0] = 0;
test_delta->elems[1] = 5;
test_delta->elems[2] = 0;
test_delta->elems[3] = 3;
test_delta->elems[4] = 2;
test_delta->elems[5] = 0;
test_delta->elems[6] = 4;
test_delta->elems[7] = 2;
test_delta->elems[8] = 1;
test_delta->elems[9] = 0;
return test_delta;
}
m_t *get_test_x() {
m_t *test_x = initM(4, 2);
test_x->elems[0] = -0.266;
test_x->elems[1] = -0.539;
test_x->elems[2] = 0.451;
test_x->elems[3] = 0.252;
test_x->elems[4] = 0.016;
test_x->elems[5] = -0.238;
test_x->elems[6] = -0.200;
test_x->elems[7] = 0.524;
return test_x;
}
|
pbkdf2-hmac-md5_fmt_plug.c | /*
* This software is Copyright (c) 2015 Dhiru and magnum
* and it is hereby released to
* the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_md5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_md5);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include <stdint.h>
#include "arch.h"
//#undef SIMD_COEF_32
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "pbkdf2_hmac_md5.h"
#include "pbkdf2_hmac_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PBKDF2-HMAC-MD5"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-MD5 " MD5_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-MD5 32/" ARCH_BITS_STR
#endif
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#if SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_MD5)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define PLAINTEXT_LENGTH 125
static struct custom_salt {
unsigned int length;
unsigned int rounds;
char salt[PBKDF2_32_MAX_SALT_SIZE];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[PBKDF2_MDx_BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int saltlen;
memset(&cs, 0, sizeof(cs));
if (!strncmp(ciphertext, PBKDF2_MD5_FORMAT_TAG, PBKDF2_MD5_TAG_LEN))
ciphertext += PBKDF2_MD5_TAG_LEN;
cs.rounds = atoi(ciphertext);
ciphertext = strchr(ciphertext, '$') + 1;
p = strchr(ciphertext, '$');
saltlen = 0;
memset(cs.salt, 0, sizeof(cs.salt));
while (ciphertext < p) { /** extract salt **/
cs.salt[saltlen++] =
atoi16[ARCH_INDEX(ciphertext[0])] * 16 +
atoi16[ARCH_INDEX(ciphertext[1])];
ciphertext += 2;
}
cs.length = saltlen;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#if SIMD_COEF_32
int lens[SSE_GROUP_SZ_MD5], i;
unsigned char *pin[SSE_GROUP_SZ_MD5];
union {
uint32_t *pout[SSE_GROUP_SZ_MD5];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_MD5; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_md5_sse((const unsigned char **)pin, lens,
(unsigned char*)cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
PBKDF2_MDx_BINARY_SIZE, 0);
#else
pbkdf2_md5((unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
(unsigned char*)cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
PBKDF2_MDx_BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
//dump_stuff_msg("\nbinary", crypt_out[count - 1], 16);
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], PBKDF2_MDx_BINARY_SIZE);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_exact(char *source, int index)
{
return pbkdf2_hmac_md5_cmp_exact(get_key(index), source, (unsigned char*)cur_salt->salt, cur_salt->length, cur_salt->rounds);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
struct fmt_main fmt_pbkdf2_hmac_md5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
PBKDF2_MDx_BINARY_SIZE,
PBKDF2_32_BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ PBKDF2_MD5_FORMAT_TAG },
pbkdf2_hmac_md5_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
pbkdf2_hmac_md5_valid,
pbkdf2_hmac_md5_split,
pbkdf2_hmac_md5_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
test.c | #include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
// enable tests
#define CHECK 1
#define DEBUG 0
#define N (992)
#define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i;})
int main(void){
#if CHECK
check_offloading();
#endif
/*
* Default device
*/
printf("Is%s initial device\n", omp_is_initial_device() ? "" : " not");
printf("Initial device: %d\n", omp_get_initial_device());
omp_set_default_device(1);
printf("Default device before task: %d\n", omp_get_default_device());
#pragma omp task
{
printf("Default device inside task: %d\n", omp_get_default_device());
omp_set_default_device(2);
printf("Default device inside task after resetting: %d\n",
omp_get_default_device());
}
#pragma omp taskwait
printf("Default device outside task: %d\n", omp_get_default_device());
// default device can set to whatever, if target fails, it goes to the host
const int default_device = 3;
omp_set_default_device(default_device);
// default device for omp target call MUST be >= 0 and <omp_get_num_devices() or
// the initial device. So when there are no devices, it must be the initial device
int default_device_omp_target_call = default_device;
if (omp_get_num_devices() == 0) {
default_device_omp_target_call = omp_get_initial_device();
}
#if DEBUG
printf("test on machine with %d devices\n", omp_get_num_devices());
#endif
/*
* Target alloc & target memcpy
*/
double A[N], B[N], C[N], D[N], E[N];
double *pA, *pB, *pC, *pD, *pE;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
pD = &D[0];
pE = &E[0];
INIT();
pA = pA - 10;
pC = pC - 20;
pD = pD - 30;
void *device_A = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
void *device_C = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
void *device_D = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
double *dpA = (double *) device_A - 100;
double *dpC = (double *) device_C - 200;
double *dpD = (double *) device_D - 300;
printf("omp_target_alloc %s\n", device_A && device_C && device_D ?
"succeeded" : "failed");
omp_target_memcpy(dpC, pC, N*sizeof(double), 200*sizeof(double),
20*sizeof(double), default_device_omp_target_call, omp_get_initial_device());
omp_target_memcpy(dpD, pD, N*sizeof(double), 300*sizeof(double),
30*sizeof(double), default_device_omp_target_call, omp_get_initial_device());
#pragma omp target is_device_ptr(dpA, dpC, dpD) device(default_device)
{
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < 992; i++) {
dpA[i+100] = dpC[i+200] + dpD[i+300] + 1;
}
}
omp_target_memcpy(pA, dpA, N*sizeof(double), 10*sizeof(double),
100*sizeof(double), omp_get_initial_device(), default_device_omp_target_call);
int fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test omp_target_memcpy: Failed\n");
} else {
printf ("Test omp_target_memcpy: Succeeded\n");
}
/*
* target_is_present and target_associate/disassociate_ptr
*/
INIT();
if (offloading_disabled()) {
// If offloading is disabled just recreate the messages so that this can
// also be tested with no device.
printf("C is not present, associating it...\n");
printf("omp_target_associate_ptr C %s\n", 1 ? "succeeded" : "failed");
} else if (!omp_target_is_present(C, default_device_omp_target_call)) {
printf("C is not present, associating it...\n");
int rc = omp_target_associate_ptr(C, dpC, N*sizeof(double),
200*sizeof(double), default_device_omp_target_call);
printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
if (offloading_disabled()) {
// If offloading is disabled just recreate the messages so that this can
// also be tested with no device.
printf("D is not present, associating it...\n");
printf("omp_target_associate_ptr D %s\n", 1 ? "succeeded" : "failed");
} else if (!omp_target_is_present(D, default_device_omp_target_call)) {
printf("D is not present, associating it...\n");
int rc = omp_target_associate_ptr(D, dpD, N*sizeof(double),
300*sizeof(double), default_device_omp_target_call);
printf("omp_target_associate_ptr D %s\n", !rc ? "succeeded" : "failed");
}
#pragma omp target data map(from: C, D) device(default_device)
{
printf("Inside target data: A is%s present\n",
(omp_target_is_present(A, default_device_omp_target_call) && !offloading_disabled()) ? "" : " not");
printf("Inside target data: C is%s present\n",
omp_target_is_present(C, default_device_omp_target_call) ? "" : " not");
printf("Inside target data: D is%s present\n",
omp_target_is_present(D, default_device_omp_target_call) ? "" : " not");
// C and D are mapped "from", so there is no copy from host to device.
// If the association was successful, their corresponding device arrays
// are already populated from previous omp_target_memcpy with the correct
// values and the following target for-loop must yield the correct results.
#pragma omp target map(from: A) device(default_device)
{
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < 992; i++)
A[i] = C[i] + D[i] + 1;
}
}
if (offloading_disabled()) {
printf("C is present, disassociating it...\n");
printf("omp_target_disassociate_ptr C %s\n", 1 ? "succeeded" : "failed");
} else if (omp_target_is_present(C, default_device_omp_target_call)) {
printf("C is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(C, default_device_omp_target_call);
printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
if (offloading_disabled()) {
printf("D is present, disassociating it...\n");
printf("omp_target_disassociate_ptr D %s\n", 1 ? "succeeded" : "failed");
} else if (omp_target_is_present(D, default_device_omp_target_call)) {
printf("D is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(D, default_device_omp_target_call);
printf("omp_target_disassociate_ptr D %s\n", !rc ? "succeeded" : "failed");
}
fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test omp_target_associate_ptr: Failed\n");
} else {
printf ("Test omp_target_associate_ptr: Succeeded\n");
}
omp_target_free(device_A, default_device_omp_target_call);
omp_target_free(device_C, default_device_omp_target_call);
omp_target_free(device_D, default_device_omp_target_call);
return 0;
}
|
DRACC_OMP_034_MxV_wrong_update_yes.c | /*
Matrix Vector multiplication with another thread on the host constantly updating the values of a.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <omp.h>
#define C 512
int *a;
int *b;
int *c;
int init(){
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
int Mult(){
#pragma omp parallel sections num_threads(2)
{
#pragma omp target map(to:a[0:C],b[0:C*C]) map(from:c[0:C]) device(0)
{
#pragma omp teams distribute parallel for
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
c[i]+=b[j+i*C]*a[j];
}
}
}
#pragma omp section
#pragma omp parallel num_threads(4)
#pragma omp for
for(int i=C; i>0; i--){
a[i]=3;
#pragma omp target update to(a[i]) device(0)
}
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = malloc(C*sizeof(int));
b = malloc(C*C*sizeof(int));
c = malloc(C*sizeof(int));
init();
omp_set_nested(1);
Mult();
check();
free(a);
free(b);
free(c);
return 0;
} |
convolution_sgemm_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_packn_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(img0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(img0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(img0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(img0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat32m1_t _val = vle32_v_f32m1(img0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
_sum2 = vle32_v_f32m1(bias + p * packn, vl);
_sum3 = vle32_v_f32m1(bias + p * packn, vl);
_sum4 = vle32_v_f32m1(bias + p * packn, vl);
_sum5 = vle32_v_f32m1(bias + p * packn, vl);
_sum6 = vle32_v_f32m1(bias + p * packn, vl);
_sum7 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
float val4 = *tmpptr++;
float val5 = *tmpptr++;
float val6 = *tmpptr++;
float val7 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl);
vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl);
vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl);
vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
_sum2 = vle32_v_f32m1(bias + p * packn, vl);
_sum3 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum0 = vle32_v_f32m1(bias + p * packn, vl);
_sum1 = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
outptr0 += packn * 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias)
{
_sum = vle32_v_f32m1(bias + p * packn, vl);
}
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vse32_v_f32m1(outptr0, _sum, vl);
outptr0 += packn;
}
}
}
static void convolution_im2col_sgemm_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * packn;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vse32_v_f32m1(ptr, _val, vl);
sptr += stride_w * packn;
ptr += packn;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_packn_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014-2022 by XGBoost Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "xgboost/base.h"
#include "xgboost/json.h"
#include "xgboost/tree_updater.h"
#include "param.h"
#include "constraints.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
#include "../common/threading_utils.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker : public TreeUpdater {
public:
explicit BaseMaker(GenericParameter const *ctx) : TreeUpdater(ctx) {}
void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
auto page = batch.GetView();
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = page[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const {
std::vector<bst_feature_t> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
{
// setup position
position_.resize(gpair.size());
std::fill(position_.begin(), position_.end(), 0);
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
CHECK_EQ(param_.sampling_method, TrainParam::kUniform)
<< "Only uniform sampling is supported, "
<< "gradient-based sampling is only support by GPU Hist.";
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
qexpand_.push_back(0);
this->UpdateNode2WorkIndex(tree);
}
this->interaction_constraints_.Configure(param_, fmat.Info().num_col_);
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief This is a helper function that uses a column based data structure
* and reset the positions to the latest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
common::ParallelFor(p_fmat->Info().num_row_, ctx_->Threads(), [&](auto ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
});
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
auto page = batch.GetView();
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = page[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
});
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
auto page = batch.GetView();
for (auto fid : fsplits) {
auto col = page[fid];
common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
});
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(ctx_->Threads());
p_node_stats->resize(tree.param.num_nodes);
dmlc::OMPException exc;
#pragma omp parallel num_threads(ctx_->Threads())
{
exc.Run([&]() {
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
});
}
exc.Rethrow();
// setup position
common::ParallelFor(fmat.Info().num_row_, ctx_->Threads(), [&](auto ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
});
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
using SketchEntry = common::SortedQuantile;
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
FeatureInteractionConstraintHost interaction_constraints_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd foo
void test_no_clause() {
int i;
#pragma omp parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}}
#pragma omp parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}}
#pragma omp parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
#pragma omp parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}}
#pragma omp parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp parallel for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp parallel for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp parallel for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp parallel for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp parallel for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp parallel for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
tree-pretty-print.c | /* Pretty formatting of GENERIC trees in C syntax.
Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011 Free Software Foundation, Inc.
Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "output.h"
#include "tree-pretty-print.h"
#include "hashtab.h"
#include "tree-flow.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "tree-chrec.h"
#include "tree-pass.h"
#include "value-prof.h"
#include "predict.h"
/* Local functions, macros and variables. */
static const char *op_symbol (const_tree);
static void pretty_print_string (pretty_printer *, const char*);
static void newline_and_indent (pretty_printer *, int);
static void maybe_init_pretty_print (FILE *);
static void print_struct_decl (pretty_printer *, const_tree, int, int);
static void do_niy (pretty_printer *, const_tree);
#define INDENT(SPACE) do { \
int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0)
#define NIY do_niy(buffer,node)
static pretty_printer buffer;
static int initialized = 0;
/* Try to print something for an unknown tree code. */
static void
do_niy (pretty_printer *buffer, const_tree node)
{
int i, len;
pp_string (buffer, "<<< Unknown tree: ");
pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]);
if (EXPR_P (node))
{
len = TREE_OPERAND_LENGTH (node);
for (i = 0; i < len; ++i)
{
newline_and_indent (buffer, 2);
dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false);
}
}
pp_string (buffer, " >>>");
}
/* Debugging function to print out a generic expression. */
DEBUG_FUNCTION void
debug_generic_expr (tree t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a generic statement. */
DEBUG_FUNCTION void
debug_generic_stmt (tree t)
{
print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a chain of trees . */
DEBUG_FUNCTION void
debug_tree_chain (tree t)
{
struct pointer_set_t *seen = pointer_set_create ();
while (t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, " ");
t = TREE_CHAIN (t);
if (pointer_set_insert (seen, t))
{
fprintf (stderr, "... [cycled back to ");
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, "]");
break;
}
}
fprintf (stderr, "\n");
pointer_set_destroy (seen);
}
/* Prints declaration DECL to the FILE with details specified by FLAGS. */
void
print_generic_decl (FILE *file, tree decl, int flags)
{
maybe_init_pretty_print (file);
print_declaration (&buffer, decl, 2, flags);
pp_write_text_to_stream (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree-pass.h. */
void
print_generic_stmt (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, true);
pp_flush (&buffer);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in tree-pass.h. The output is indented by
INDENT spaces. */
void
print_generic_stmt_indented (FILE *file, tree t, int flags, int indent)
{
int i;
maybe_init_pretty_print (file);
for (i = 0; i < indent; i++)
pp_space (&buffer);
dump_generic_node (&buffer, t, indent, flags, true);
pp_flush (&buffer);
}
/* Print a single expression T on file FILE. FLAGS specifies details to show
in the dump. See TDF_* in tree-pass.h. */
void
print_generic_expr (FILE *file, tree t, int flags)
{
maybe_init_pretty_print (file);
dump_generic_node (&buffer, t, 0, flags, false);
}
/* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set
in FLAGS. */
static void
dump_decl_name (pretty_printer *buffer, tree node, int flags)
{
if (DECL_NAME (node))
{
if ((flags & TDF_ASMNAME) && DECL_ASSEMBLER_NAME_SET_P (node))
pp_tree_identifier (buffer, DECL_ASSEMBLER_NAME (node));
else
pp_tree_identifier (buffer, DECL_NAME (node));
}
if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE)
{
if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1)
pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (node));
else if (TREE_CODE (node) == DEBUG_EXPR_DECL)
{
if (flags & TDF_NOUID)
pp_string (buffer, "D#xxxx");
else
pp_printf (buffer, "D#%i", DEBUG_TEMP_UID (node));
}
else
{
char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D';
if (flags & TDF_NOUID)
pp_printf (buffer, "%c.xxxx", c);
else
pp_printf (buffer, "%c.%u", c, DECL_UID (node));
}
}
if ((flags & TDF_ALIAS) && DECL_PT_UID (node) != DECL_UID (node))
{
if (flags & TDF_NOUID)
pp_printf (buffer, "ptD.xxxx");
else
pp_printf (buffer, "ptD.%u", DECL_PT_UID (node));
}
}
/* Like the above, but used for pretty printing function calls. */
static void
dump_function_name (pretty_printer *buffer, tree node, int flags)
{
if (TREE_CODE (node) == NOP_EXPR)
node = TREE_OPERAND (node, 0);
if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0)
pp_string (buffer, lang_hooks.decl_printable_name (node, 1));
else
dump_decl_name (buffer, node, flags);
}
/* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and
FLAGS are as in dump_generic_node. */
static void
dump_function_declaration (pretty_printer *buffer, tree node,
int spc, int flags)
{
bool wrote_arg = false;
tree arg;
pp_space (buffer);
pp_character (buffer, '(');
/* Print the argument types. */
arg = TYPE_ARG_TYPES (node);
while (arg && arg != void_list_node && arg != error_mark_node)
{
if (wrote_arg)
{
pp_character (buffer, ',');
pp_space (buffer);
}
wrote_arg = true;
dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false);
arg = TREE_CHAIN (arg);
}
/* Drop the trailing void_type_node if we had any previous argument. */
if (arg == void_list_node && !wrote_arg)
pp_string (buffer, "void");
/* Properly dump vararg function types. */
else if (!arg && wrote_arg)
pp_string (buffer, ", ...");
/* Avoid printing any arg for unprototyped functions. */
pp_character (buffer, ')');
}
/* Dump the domain associated with an array. */
static void
dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags)
{
pp_character (buffer, '[');
if (domain)
{
tree min = TYPE_MIN_VALUE (domain);
tree max = TYPE_MAX_VALUE (domain);
if (min && max
&& integer_zerop (min)
&& host_integerp (max, 0))
pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1);
else
{
if (min)
dump_generic_node (buffer, min, spc, flags, false);
pp_character (buffer, ':');
if (max)
dump_generic_node (buffer, max, spc, flags, false);
}
}
else
pp_string (buffer, "<unknown>");
pp_character (buffer, ']');
}
/* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags)
{
const char *name;
switch (OMP_CLAUSE_CODE (clause))
{
case OMP_CLAUSE_PRIVATE:
name = "private";
goto print_remap;
case OMP_CLAUSE_SHARED:
name = "shared";
goto print_remap;
case OMP_CLAUSE_FIRSTPRIVATE:
name = "firstprivate";
goto print_remap;
case OMP_CLAUSE_LASTPRIVATE:
name = "lastprivate";
goto print_remap;
case OMP_CLAUSE_COPYIN:
name = "copyin";
goto print_remap;
case OMP_CLAUSE_COPYPRIVATE:
name = "copyprivate";
goto print_remap;
print_remap:
pp_string (buffer, name);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_REDUCTION:
pp_string (buffer, "reduction(");
pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause)));
pp_character (buffer, ':');
dump_generic_node (buffer, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_IF:
pp_string (buffer, "if(");
dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NUM_THREADS:
pp_string (buffer, "num_threads(");
dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_NOWAIT:
pp_string (buffer, "nowait");
break;
case OMP_CLAUSE_ORDERED:
pp_string (buffer, "ordered");
break;
case OMP_CLAUSE_DEFAULT:
pp_string (buffer, "default(");
switch (OMP_CLAUSE_DEFAULT_KIND (clause))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
pp_string (buffer, "shared");
break;
case OMP_CLAUSE_DEFAULT_NONE:
pp_string (buffer, "none");
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
pp_string (buffer, "private");
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
pp_string (buffer, "firstprivate");
break;
default:
gcc_unreachable ();
}
pp_character (buffer, ')');
break;
case OMP_CLAUSE_SCHEDULE:
pp_string (buffer, "schedule(");
switch (OMP_CLAUSE_SCHEDULE_KIND (clause))
{
case OMP_CLAUSE_SCHEDULE_STATIC:
pp_string (buffer, "static");
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
pp_string (buffer, "dynamic");
break;
case OMP_CLAUSE_SCHEDULE_GUIDED:
pp_string (buffer, "guided");
break;
case OMP_CLAUSE_SCHEDULE_RUNTIME:
pp_string (buffer, "runtime");
break;
case OMP_CLAUSE_SCHEDULE_AUTO:
pp_string (buffer, "auto");
break;
default:
gcc_unreachable ();
}
if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause))
{
pp_character (buffer, ',');
dump_generic_node (buffer,
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause),
spc, flags, false);
}
pp_character (buffer, ')');
break;
case OMP_CLAUSE_UNTIED:
pp_string (buffer, "untied");
break;
case OMP_CLAUSE_COLLAPSE:
pp_string (buffer, "collapse(");
dump_generic_node (buffer,
OMP_CLAUSE_COLLAPSE_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_FINAL:
pp_string (buffer, "final(");
dump_generic_node (buffer, OMP_CLAUSE_FINAL_EXPR (clause),
spc, flags, false);
pp_character (buffer, ')');
break;
case OMP_CLAUSE_MERGEABLE:
pp_string (buffer, "mergeable");
break;
default:
/* Should never happen. */
dump_generic_node (buffer, clause, spc, flags, false);
break;
}
}
/* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in
dump_generic_node. */
void
dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags)
{
if (clause == NULL)
return;
pp_space (buffer);
while (1)
{
dump_omp_clause (buffer, clause, spc, flags);
clause = OMP_CLAUSE_CHAIN (clause);
if (clause == NULL)
return;
pp_space (buffer);
}
}
/* Dump location LOC to BUFFER. */
static void
dump_location (pretty_printer *buffer, location_t loc)
{
expanded_location xloc = expand_location (loc);
pp_character (buffer, '[');
if (xloc.file)
{
pp_string (buffer, xloc.file);
pp_string (buffer, " : ");
}
pp_decimal_int (buffer, xloc.line);
pp_string (buffer, "] ");
}
/* Dump lexical block BLOCK. BUFFER, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_block_node (pretty_printer *buffer, tree block, int spc, int flags)
{
tree t;
pp_printf (buffer, "BLOCK #%d ", BLOCK_NUMBER (block));
if (flags & TDF_ADDRESS)
pp_printf (buffer, "[%p] ", (void *) block);
if (BLOCK_ABSTRACT (block))
pp_string (buffer, "[abstract] ");
if (TREE_ASM_WRITTEN (block))
pp_string (buffer, "[written] ");
if (flags & TDF_SLIM)
return;
if (BLOCK_SOURCE_LOCATION (block))
dump_location (buffer, BLOCK_SOURCE_LOCATION (block));
newline_and_indent (buffer, spc + 2);
if (BLOCK_SUPERCONTEXT (block))
{
pp_string (buffer, "SUPERCONTEXT: ");
dump_generic_node (buffer, BLOCK_SUPERCONTEXT (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_SUBBLOCKS (block))
{
pp_string (buffer, "SUBBLOCKS: ");
for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_CHAIN (block))
{
pp_string (buffer, "SIBLINGS: ");
for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_VARS (block))
{
pp_string (buffer, "VARS: ");
for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0)
{
unsigned i;
VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block);
pp_string (buffer, "NONLOCALIZED_VARS: ");
FOR_EACH_VEC_ELT (tree, nlv, i, t)
{
dump_generic_node (buffer, t, 0, flags, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_ABSTRACT_ORIGIN (block))
{
pp_string (buffer, "ABSTRACT_ORIGIN: ");
dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_FRAGMENT_ORIGIN (block))
{
pp_string (buffer, "FRAGMENT_ORIGIN: ");
dump_generic_node (buffer, BLOCK_FRAGMENT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (buffer, spc + 2);
}
if (BLOCK_FRAGMENT_CHAIN (block))
{
pp_string (buffer, "FRAGMENT_CHAIN: ");
for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t))
{
dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false);
pp_string (buffer, " ");
}
newline_and_indent (buffer, spc + 2);
}
}
/* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
tree-pass.h). If IS_STMT is true, the object printed is considered
to be a statement and it is terminated by ';' if appropriate. */
int
dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
bool is_stmt)
{
tree type;
tree op0, op1;
const char *str;
bool is_expr;
if (node == NULL_TREE)
return spc;
is_expr = EXPR_P (node);
if (is_stmt && (flags & TDF_STMTADDR))
pp_printf (buffer, "<&%p> ", (void *)node);
if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node))
dump_location (buffer, EXPR_LOCATION (node));
switch (TREE_CODE (node))
{
case ERROR_MARK:
pp_string (buffer, "<<< error >>>");
break;
case IDENTIFIER_NODE:
pp_tree_identifier (buffer, node);
break;
case TREE_LIST:
while (node && node != error_mark_node)
{
if (TREE_PURPOSE (node))
{
dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false);
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false);
node = TREE_CHAIN (node);
if (node && TREE_CODE (node) == TREE_LIST)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
break;
case TREE_BINFO:
dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false);
break;
case TREE_VEC:
{
size_t i;
if (TREE_VEC_LENGTH (node) > 0)
{
size_t len = TREE_VEC_LENGTH (node);
for (i = 0; i < len - 1; i++)
{
dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags,
false);
pp_character (buffer, ',');
pp_space (buffer);
}
dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc,
flags, false);
}
}
break;
case VOID_TYPE:
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
enum tree_code_class tclass;
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
else if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile ");
else if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, "restrict ");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (buffer, "<address-space-");
pp_decimal_int (buffer, TYPE_ADDR_SPACE (node));
pp_string (buffer, "> ");
}
tclass = TREE_CODE_CLASS (TREE_CODE (node));
if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else
pp_string (buffer, "<unnamed type decl>");
}
else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
pp_tree_identifier (buffer, TYPE_NAME (node));
else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else
pp_string (buffer, "<unnamed type>");
}
else if (TREE_CODE (node) == VECTOR_TYPE)
{
pp_string (buffer, "vector");
pp_character (buffer, '(');
pp_wide_integer (buffer, TYPE_VECTOR_SUBPARTS (node));
pp_string (buffer, ") ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == INTEGER_TYPE)
{
pp_string (buffer, (TYPE_UNSIGNED (node)
? "<unnamed-unsigned:"
: "<unnamed-signed:"));
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == COMPLEX_TYPE)
{
pp_string (buffer, "__complex__ ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == REAL_TYPE)
{
pp_string (buffer, "<float:");
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == FIXED_POINT_TYPE)
{
pp_string (buffer, "<fixed-point-");
pp_string (buffer, TYPE_SATURATING (node) ? "sat:" : "nonsat:");
pp_decimal_int (buffer, TYPE_PRECISION (node));
pp_string (buffer, ">");
}
else if (TREE_CODE (node) == VOID_TYPE)
pp_string (buffer, "void");
else
pp_string (buffer, "<unnamed type>");
}
break;
}
case POINTER_TYPE:
case REFERENCE_TYPE:
str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&");
if (TREE_TYPE (node) == NULL)
{
pp_string (buffer, str);
pp_string (buffer, "<null type>");
}
else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE)
{
tree fnode = TREE_TYPE (node);
dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '(');
pp_string (buffer, str);
if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else if (flags & TDF_NOUID)
pp_printf (buffer, "<Txxxx>");
else
pp_printf (buffer, "<T%x>", TYPE_UID (node));
pp_character (buffer, ')');
dump_function_declaration (buffer, fnode, spc, flags);
}
else
{
unsigned int quals = TYPE_QUALS (node);
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_space (buffer);
pp_string (buffer, str);
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, " const");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, " volatile");
if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, " restrict");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (buffer, " <address-space-");
pp_decimal_int (buffer, TYPE_ADDR_SPACE (node));
pp_string (buffer, ">");
}
if (TYPE_REF_CAN_ALIAS_ALL (node))
pp_string (buffer, " {ref-all}");
}
break;
case OFFSET_TYPE:
NIY;
break;
case MEM_REF:
{
if (integer_zerop (TREE_OPERAND (node, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we can't
infer them and MEM_ATTR caching will share MEM_REFs
with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (node, 0)) != INTEGER_CST
/* Released SSA_NAMES have no TREE_TYPE. */
&& TREE_TYPE (TREE_OPERAND (node, 0)) != NULL_TREE
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (node))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))))
{
if (TREE_CODE (TREE_OPERAND (node, 0)) != ADDR_EXPR)
{
pp_string (buffer, "*");
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, false);
}
else
dump_generic_node (buffer,
TREE_OPERAND (TREE_OPERAND (node, 0), 0),
spc, flags, false);
}
else
{
tree ptype;
pp_string (buffer, "MEM[");
pp_string (buffer, "(");
ptype = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (node, 1)));
dump_generic_node (buffer, ptype,
spc, flags | TDF_SLIM, false);
pp_string (buffer, ")");
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, false);
if (!integer_zerop (TREE_OPERAND (node, 1)))
{
pp_string (buffer, " + ");
dump_generic_node (buffer, TREE_OPERAND (node, 1),
spc, flags, false);
}
pp_string (buffer, "]");
}
break;
}
case TARGET_MEM_REF:
{
const char *sep = "";
tree tmp;
pp_string (buffer, "MEM[");
if (TREE_CODE (TMR_BASE (node)) == ADDR_EXPR)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "symbol: ");
dump_generic_node (buffer, TREE_OPERAND (TMR_BASE (node), 0),
spc, flags, false);
}
else
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "base: ");
dump_generic_node (buffer, TMR_BASE (node), spc, flags, false);
}
tmp = TMR_INDEX2 (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "base: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_INDEX (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "index: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_STEP (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "step: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
tmp = TMR_OFFSET (node);
if (tmp)
{
pp_string (buffer, sep);
sep = ", ";
pp_string (buffer, "offset: ");
dump_generic_node (buffer, tmp, spc, flags, false);
}
pp_string (buffer, "]");
}
break;
case ARRAY_TYPE:
{
tree tmp;
/* Print the innermost component type. */
for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE;
tmp = TREE_TYPE (tmp))
;
dump_generic_node (buffer, tmp, spc, flags, false);
/* Print the dimensions. */
for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp))
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
break;
}
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (buffer, "volatile ");
/* Print the name of the structure. */
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if (TREE_CODE (node) == UNION_TYPE)
pp_string (buffer, "union ");
if (TYPE_NAME (node))
dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false);
else if (!(flags & TDF_SLIM))
/* FIXME: If we eliminate the 'else' above and attempt
to show the fields for named types, we may get stuck
following a cycle of pointers to structs. The alleged
self-reference check in print_struct_decl will not detect
cycles involving more than one pointer or struct type. */
print_struct_decl (buffer, node, spc, flags);
break;
}
case LANG_TYPE:
NIY;
break;
case INTEGER_CST:
if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE)
{
/* In the case of a pointer, one may want to divide by the
size of the pointed-to type. Unfortunately, this not
straightforward. The C front-end maps expressions
(int *) 5
int *p; (p + 5)
in such a way that the two INTEGER_CST nodes for "5" have
different values but identical types. In the latter
case, the 5 is multiplied by sizeof (int) in c-common.c
(pointer_int_sum) to convert it to a byte address, and
yet the type of the node is left unchanged. Argh. What
is consistent though is that the number value corresponds
to bytes (UNITS) offset.
NB: Neither of the following divisors can be trivially
used to recover the original literal:
TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node)))
TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
pp_string (buffer, "B"); /* pseudo-unit */
}
else if (host_integerp (node, 0))
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
else if (host_integerp (node, 1))
pp_unsigned_wide_integer (buffer, TREE_INT_CST_LOW (node));
else
{
tree val = node;
unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val);
HOST_WIDE_INT high = TREE_INT_CST_HIGH (val);
if (tree_int_cst_sgn (val) < 0)
{
pp_character (buffer, '-');
high = ~high + !low;
low = -low;
}
/* Would "%x%0*x" or "%x%*0x" get zero-padding on all
systems? */
sprintf (pp_buffer (buffer)->digit_buffer,
HOST_WIDE_INT_PRINT_DOUBLE_HEX,
(unsigned HOST_WIDE_INT) high, low);
pp_string (buffer, pp_buffer (buffer)->digit_buffer);
}
break;
case REAL_CST:
/* Code copied from print_node. */
{
REAL_VALUE_TYPE d;
if (TREE_OVERFLOW (node))
pp_string (buffer, " overflow");
#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
d = TREE_REAL_CST (node);
if (REAL_VALUE_ISINF (d))
pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf");
else if (REAL_VALUE_ISNAN (d))
pp_string (buffer, " Nan");
else
{
char string[100];
real_to_decimal (string, &d, sizeof (string), 0, 1);
pp_string (buffer, string);
}
#else
{
HOST_WIDE_INT i;
unsigned char *p = (unsigned char *) &TREE_REAL_CST (node);
pp_string (buffer, "0x");
for (i = 0; i < sizeof TREE_REAL_CST (node); i++)
output_formatted_integer (buffer, "%02x", *p++);
}
#endif
break;
}
case FIXED_CST:
{
char string[100];
fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string));
pp_string (buffer, string);
break;
}
case COMPLEX_CST:
pp_string (buffer, "__complex__ (");
dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false);
pp_string (buffer, ")");
break;
case STRING_CST:
pp_string (buffer, "\"");
pretty_print_string (buffer, TREE_STRING_POINTER (node));
pp_string (buffer, "\"");
break;
case VECTOR_CST:
{
tree elt;
pp_string (buffer, "{ ");
for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt))
{
dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false);
if (TREE_CHAIN (elt))
pp_string (buffer, ", ");
}
pp_string (buffer, " }");
}
break;
case FUNCTION_TYPE:
case METHOD_TYPE:
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_space (buffer);
if (TREE_CODE (node) == METHOD_TYPE)
{
if (TYPE_METHOD_BASETYPE (node))
dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)),
flags);
else
pp_string (buffer, "<null method basetype>");
pp_string (buffer, "::");
}
if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (buffer, TYPE_NAME (node), flags);
else if (flags & TDF_NOUID)
pp_printf (buffer, "<Txxxx>");
else
pp_printf (buffer, "<T%x>", TYPE_UID (node));
dump_function_declaration (buffer, node, spc, flags);
break;
case FUNCTION_DECL:
case CONST_DECL:
dump_decl_name (buffer, node, flags);
break;
case LABEL_DECL:
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else if (LABEL_DECL_UID (node) != -1)
pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node));
else
{
if (flags & TDF_NOUID)
pp_string (buffer, "<D.xxxx>");
else
pp_printf (buffer, "<D.%u>", DECL_UID (node));
}
break;
case TYPE_DECL:
if (DECL_IS_BUILTIN (node))
{
/* Don't print the declaration of built-in types. */
break;
}
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else if (TYPE_NAME (TREE_TYPE (node)) != node)
{
if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
&& TYPE_METHODS (TREE_TYPE (node)))
{
/* The type is a c++ class: all structures have at least
4 methods. */
pp_string (buffer, "class ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
else
{
pp_string (buffer,
(TREE_CODE (TREE_TYPE (node)) == UNION_TYPE
? "union" : "struct "));
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
}
}
else
pp_string (buffer, "<anon>");
break;
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case DEBUG_EXPR_DECL:
case NAMESPACE_DECL:
dump_decl_name (buffer, node, flags);
break;
case RESULT_DECL:
pp_string (buffer, "<retval>");
break;
case COMPONENT_REF:
op0 = TREE_OPERAND (node, 0);
str = ".";
if (op0
&& (TREE_CODE (op0) == INDIRECT_REF
|| (TREE_CODE (op0) == MEM_REF
&& TREE_CODE (TREE_OPERAND (op0, 0)) != ADDR_EXPR
&& integer_zerop (TREE_OPERAND (op0, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we
can't infer them and MEM_ATTR caching will share
MEM_REFs with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (op0, 0)) != INTEGER_CST
/* Released SSA_NAMES have no TREE_TYPE. */
&& TREE_TYPE (TREE_OPERAND (op0, 0)) != NULL_TREE
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))))))
{
op0 = TREE_OPERAND (op0, 0);
str = "->";
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_string (buffer, str);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
op0 = component_ref_field_offset (node);
if (op0 && TREE_CODE (op0) != INTEGER_CST)
{
pp_string (buffer, "{off: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, '}');
}
break;
case BIT_FIELD_REF:
pp_string (buffer, "BIT_FIELD_REF <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
op0 = TREE_OPERAND (node, 0);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
pp_character (buffer, '[');
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
if (TREE_CODE (node) == ARRAY_RANGE_REF)
pp_string (buffer, " ...");
pp_character (buffer, ']');
op0 = array_ref_low_bound (node);
op1 = array_ref_element_size (node);
if (!integer_zerop (op0)
|| TREE_OPERAND (node, 2)
|| TREE_OPERAND (node, 3))
{
pp_string (buffer, "{lb: ");
dump_generic_node (buffer, op0, spc, flags, false);
pp_string (buffer, " sz: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, '}');
}
break;
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT ix;
tree field, val;
bool is_struct_init = false;
bool is_array_init = false;
double_int curidx = double_int_zero;
pp_character (buffer, '{');
if (TREE_CLOBBER_P (node))
pp_string (buffer, "CLOBBER");
else if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
is_struct_init = true;
else if (TREE_CODE (TREE_TYPE (node)) == ARRAY_TYPE
&& TYPE_DOMAIN (TREE_TYPE (node))
&& TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)))
&& TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))))
== INTEGER_CST)
{
tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)));
is_array_init = true;
curidx = tree_to_double_int (minv);
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
if (field)
{
if (is_struct_init)
{
pp_character (buffer, '.');
dump_generic_node (buffer, field, spc, flags, false);
pp_character (buffer, '=');
}
else if (is_array_init
&& (TREE_CODE (field) != INTEGER_CST
|| !double_int_equal_p (tree_to_double_int (field),
curidx)))
{
pp_character (buffer, '[');
if (TREE_CODE (field) == RANGE_EXPR)
{
dump_generic_node (buffer, TREE_OPERAND (field, 0), spc,
flags, false);
pp_string (buffer, " ... ");
dump_generic_node (buffer, TREE_OPERAND (field, 1), spc,
flags, false);
if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST)
curidx = tree_to_double_int (TREE_OPERAND (field, 1));
}
else
dump_generic_node (buffer, field, spc, flags, false);
if (TREE_CODE (field) == INTEGER_CST)
curidx = tree_to_double_int (field);
pp_string (buffer, "]=");
}
}
if (is_array_init)
curidx = double_int_add (curidx, double_int_one);
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
if (val && TREE_CODE (val) == FUNCTION_DECL)
dump_decl_name (buffer, val, flags);
else
dump_generic_node (buffer, val, spc, flags, false);
if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1)
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
pp_character (buffer, '}');
}
break;
case COMPOUND_EXPR:
{
tree *tp;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<COMPOUND_EXPR>");
break;
}
dump_generic_node (buffer, TREE_OPERAND (node, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
for (tp = &TREE_OPERAND (node, 1);
TREE_CODE (*tp) == COMPOUND_EXPR;
tp = &TREE_OPERAND (*tp, 1))
{
dump_generic_node (buffer, TREE_OPERAND (*tp, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (buffer, spc);
else
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM));
}
break;
case STATEMENT_LIST:
{
tree_stmt_iterator si;
bool first = true;
if (flags & TDF_SLIM)
{
pp_string (buffer, "<STATEMENT_LIST>");
break;
}
for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si))
{
if (!first)
newline_and_indent (buffer, spc);
else
first = false;
dump_generic_node (buffer, tsi_stmt (si), spc, flags, true);
}
}
break;
case MODIFY_EXPR:
case INIT_EXPR:
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags,
false);
pp_space (buffer);
pp_character (buffer, '=');
if (TREE_CODE (node) == MODIFY_EXPR
&& MOVE_NONTEMPORAL (node))
pp_string (buffer, "{nt}");
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags,
false);
break;
case TARGET_EXPR:
pp_string (buffer, "TARGET_EXPR <");
dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false);
pp_character (buffer, ',');
pp_space (buffer);
dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false);
pp_character (buffer, '>');
break;
case DECL_EXPR:
print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags);
is_stmt = false;
break;
case COND_EXPR:
if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node)
{
pp_string (buffer, "if (");
dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false);
pp_character (buffer, ')');
/* The lowered cond_exprs should always be printed in full. */
if (COND_EXPR_THEN (node)
&& (IS_EMPTY_STMT (COND_EXPR_THEN (node))
|| TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR)
&& COND_EXPR_ELSE (node)
&& (IS_EMPTY_STMT (COND_EXPR_ELSE (node))
|| TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR))
{
pp_space (buffer);
dump_generic_node (buffer, COND_EXPR_THEN (node),
0, flags, true);
if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
pp_string (buffer, " else ");
dump_generic_node (buffer, COND_EXPR_ELSE (node),
0, flags, true);
}
}
else if (!(flags & TDF_SLIM))
{
/* Output COND_EXPR_THEN. */
if (COND_EXPR_THEN (node))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
/* Output COND_EXPR_ELSE. */
if (COND_EXPR_ELSE (node)
&& !IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
newline_and_indent (buffer, spc);
pp_string (buffer, "else");
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4,
flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
}
is_expr = false;
}
else
{
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '?');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_space (buffer);
pp_character (buffer, ':');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
}
break;
case BIND_EXPR:
pp_character (buffer, '{');
if (!(flags & TDF_SLIM))
{
if (BIND_EXPR_VARS (node))
{
pp_newline (buffer);
for (op0 = BIND_EXPR_VARS (node); op0; op0 = DECL_CHAIN (op0))
{
print_declaration (buffer, op0, spc+2, flags);
pp_newline (buffer);
}
}
newline_and_indent (buffer, spc+2);
dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true);
newline_and_indent (buffer, spc);
pp_character (buffer, '}');
}
is_expr = false;
break;
case CALL_EXPR:
print_call_name (buffer, CALL_EXPR_FN (node), flags);
/* Print parameters. */
pp_space (buffer);
pp_character (buffer, '(');
{
tree arg;
call_expr_arg_iterator iter;
FOR_EACH_CALL_EXPR_ARG (arg, iter, node)
{
dump_generic_node (buffer, arg, spc, flags, false);
if (more_call_expr_args_p (&iter))
{
pp_character (buffer, ',');
pp_space (buffer);
}
}
}
if (CALL_EXPR_VA_ARG_PACK (node))
{
if (call_expr_nargs (node) > 0)
{
pp_character (buffer, ',');
pp_space (buffer);
}
pp_string (buffer, "__builtin_va_arg_pack ()");
}
pp_character (buffer, ')');
op1 = CALL_EXPR_STATIC_CHAIN (node);
if (op1)
{
pp_string (buffer, " [static-chain: ");
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ']');
}
if (CALL_EXPR_RETURN_SLOT_OPT (node))
pp_string (buffer, " [return slot optimization]");
if (CALL_EXPR_TAILCALL (node))
pp_string (buffer, " [tail call]");
break;
case WITH_CLEANUP_EXPR:
NIY;
break;
case CLEANUP_POINT_EXPR:
pp_string (buffer, "<<cleanup_point ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">>");
break;
case PLACEHOLDER_EXPR:
pp_string (buffer, "<PLACEHOLDER_EXPR ");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_character (buffer, '>');
break;
/* Binary arithmetic and logic expressions. */
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case MULT_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case MINUS_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
case WIDEN_LSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
{
const char *op = op_symbol (node);
op0 = TREE_OPERAND (node, 0);
op1 = TREE_OPERAND (node, 1);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op0) <= op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op0, spc, flags, false);
pp_space (buffer);
pp_string (buffer, op);
pp_space (buffer);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op1) <= op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, op1, spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, op1, spc, flags, false);
}
break;
/* Unary arithmetic and logic expressions. */
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case TRUTH_NOT_EXPR:
case ADDR_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case INDIRECT_REF:
if (TREE_CODE (node) == ADDR_EXPR
&& (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST
|| TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL))
; /* Do not output '&' for strings and function pointers. */
else
pp_string (buffer, op_symbol (node));
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
break;
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_character (buffer, '(');
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
}
else
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, op_symbol (node));
break;
case MIN_EXPR:
pp_string (buffer, "MIN_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case MAX_EXPR:
pp_string (buffer, "MAX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_character (buffer, '>');
break;
case ABS_EXPR:
pp_string (buffer, "ABS_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case RANGE_EXPR:
NIY;
break;
case ADDR_SPACE_CONVERT_EXPR:
case FIXED_CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
type = TREE_TYPE (node);
op0 = TREE_OPERAND (node, 0);
if (type != TREE_TYPE (op0))
{
pp_character (buffer, '(');
dump_generic_node (buffer, type, spc, flags, false);
pp_string (buffer, ") ");
}
if (op_prio (op0) < op_prio (node))
pp_character (buffer, '(');
dump_generic_node (buffer, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_character (buffer, ')');
break;
case VIEW_CONVERT_EXPR:
pp_string (buffer, "VIEW_CONVERT_EXPR<");
dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false);
pp_string (buffer, ">(");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, ')');
break;
case PAREN_EXPR:
pp_string (buffer, "((");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, "))");
break;
case NON_LVALUE_EXPR:
pp_string (buffer, "NON_LVALUE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case SAVE_EXPR:
pp_string (buffer, "SAVE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_character (buffer, '>');
break;
case COMPLEX_EXPR:
pp_string (buffer, "COMPLEX_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case CONJ_EXPR:
pp_string (buffer, "CONJ_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case REALPART_EXPR:
pp_string (buffer, "REALPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case IMAGPART_EXPR:
pp_string (buffer, "IMAGPART_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case VA_ARG_EXPR:
pp_string (buffer, "VA_ARG_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ">");
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
pp_string (buffer, "try");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
newline_and_indent (buffer, spc);
pp_string (buffer,
(TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case CATCH_EXPR:
pp_string (buffer, "catch (");
dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case EH_FILTER_EXPR:
pp_string (buffer, "<<<eh_filter (");
dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false);
pp_string (buffer, ")>>>");
newline_and_indent (buffer, spc+2);
pp_string (buffer, "{");
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_string (buffer, "}");
is_expr = false;
break;
case LABEL_EXPR:
op0 = TREE_OPERAND (node, 0);
/* If this is for break or continue, don't bother printing it. */
if (DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
break;
}
dump_generic_node (buffer, op0, spc, flags, false);
pp_character (buffer, ':');
if (DECL_NONLOCAL (op0))
pp_string (buffer, " [non-local]");
break;
case LOOP_EXPR:
pp_string (buffer, "while (1)");
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true);
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case PREDICT_EXPR:
pp_string (buffer, "// predicted ");
if (PREDICT_EXPR_OUTCOME (node))
pp_string (buffer, "likely by ");
else
pp_string (buffer, "unlikely by ");
pp_string (buffer, predictor_name (PREDICT_EXPR_PREDICTOR (node)));
pp_string (buffer, " predictor.");
break;
case RETURN_EXPR:
pp_string (buffer, "return");
op0 = TREE_OPERAND (node, 0);
if (op0)
{
pp_space (buffer);
if (TREE_CODE (op0) == MODIFY_EXPR)
dump_generic_node (buffer, TREE_OPERAND (op0, 1),
spc, flags, false);
else
dump_generic_node (buffer, op0, spc, flags, false);
}
break;
case EXIT_EXPR:
pp_string (buffer, "if (");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ") break");
break;
case SWITCH_EXPR:
pp_string (buffer, "switch (");
dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false);
pp_character (buffer, ')');
if (!(flags & TDF_SLIM))
{
newline_and_indent (buffer, spc+2);
pp_character (buffer, '{');
if (SWITCH_BODY (node))
{
newline_and_indent (buffer, spc+4);
dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags,
true);
}
else
{
tree vec = SWITCH_LABELS (node);
size_t i, n = TREE_VEC_LENGTH (vec);
for (i = 0; i < n; ++i)
{
tree elt = TREE_VEC_ELT (vec, i);
newline_and_indent (buffer, spc+4);
if (elt)
{
dump_generic_node (buffer, elt, spc+4, flags, false);
pp_string (buffer, " goto ");
dump_generic_node (buffer, CASE_LABEL (elt), spc+4,
flags, true);
pp_semicolon (buffer);
}
else
pp_string (buffer, "case ???: goto ???;");
}
}
newline_and_indent (buffer, spc+2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case GOTO_EXPR:
op0 = GOTO_DESTINATION (node);
if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
{
pp_string (buffer, name);
break;
}
}
pp_string (buffer, "goto ");
dump_generic_node (buffer, op0, spc, flags, false);
break;
case ASM_EXPR:
pp_string (buffer, "__asm__");
if (ASM_VOLATILE_P (node))
pp_string (buffer, " __volatile__");
pp_character (buffer, '(');
dump_generic_node (buffer, ASM_STRING (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false);
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false);
if (ASM_CLOBBERS (node))
{
pp_character (buffer, ':');
dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false);
}
pp_string (buffer, ")");
break;
case CASE_LABEL_EXPR:
if (CASE_LOW (node) && CASE_HIGH (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
pp_string (buffer, " ... ");
dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false);
}
else if (CASE_LOW (node))
{
pp_string (buffer, "case ");
dump_generic_node (buffer, CASE_LOW (node), spc, flags, false);
}
else
pp_string (buffer, "default");
pp_character (buffer, ':');
break;
case OBJ_TYPE_REF:
pp_string (buffer, "OBJ_TYPE_REF(");
dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_character (buffer, ';');
dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false);
pp_character (buffer, '-');
pp_character (buffer, '>');
dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false);
pp_character (buffer, ')');
break;
case SSA_NAME:
dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false);
pp_string (buffer, "_");
pp_decimal_int (buffer, SSA_NAME_VERSION (node));
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node))
pp_string (buffer, "(ab)");
else if (SSA_NAME_IS_DEFAULT_DEF (node))
pp_string (buffer, "(D)");
break;
case WITH_SIZE_EXPR:
pp_string (buffer, "WITH_SIZE_EXPR <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ">");
break;
case ASSERT_EXPR:
pp_string (buffer, "ASSERT_EXPR <");
dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false);
pp_string (buffer, ">");
break;
case SCEV_KNOWN:
pp_string (buffer, "scev_known");
break;
case SCEV_NOT_KNOWN:
pp_string (buffer, "scev_not_known");
break;
case POLYNOMIAL_CHREC:
pp_string (buffer, "{");
dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false);
pp_string (buffer, ", +, ");
dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false);
pp_string (buffer, "}_");
dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false);
is_stmt = false;
break;
case REALIGN_LOAD_EXPR:
pp_string (buffer, "REALIGN_LOAD <");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, ">");
break;
case VEC_COND_EXPR:
pp_string (buffer, " VEC_COND_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PERM_EXPR:
pp_string (buffer, " VEC_PERM_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " , ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case DOT_PROD_EXPR:
pp_string (buffer, " DOT_PROD_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case WIDEN_MULT_PLUS_EXPR:
pp_string (buffer, " WIDEN_MULT_PLUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case WIDEN_MULT_MINUS_EXPR:
pp_string (buffer, " WIDEN_MULT_MINUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case FMA_EXPR:
pp_string (buffer, " FMA_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (buffer, " > ");
break;
case OMP_PARALLEL:
pp_string (buffer, "#pragma omp parallel");
dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags);
dump_omp_body:
if (!(flags & TDF_SLIM) && OMP_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
is_expr = false;
break;
case OMP_TASK:
pp_string (buffer, "#pragma omp task");
dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_FOR:
pp_string (buffer, "#pragma omp for");
dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags);
if (!(flags & TDF_SLIM))
{
int i;
if (OMP_FOR_PRE_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
spc += 4;
newline_and_indent (buffer, spc);
dump_generic_node (buffer, OMP_FOR_PRE_BODY (node),
spc, flags, false);
}
spc -= 2;
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++)
{
spc += 2;
newline_and_indent (buffer, spc);
pp_string (buffer, "for (");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i),
spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i),
spc, flags, false);
pp_string (buffer, "; ");
dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i),
spc, flags, false);
pp_string (buffer, ")");
}
if (OMP_FOR_BODY (node))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 4);
dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags,
false);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2;
if (OMP_FOR_PRE_BODY (node))
{
spc -= 4;
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
}
is_expr = false;
break;
case OMP_SECTIONS:
pp_string (buffer, "#pragma omp sections");
dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_SECTION:
pp_string (buffer, "#pragma omp section");
goto dump_omp_body;
case OMP_MASTER:
pp_string (buffer, "#pragma omp master");
goto dump_omp_body;
case OMP_ORDERED:
pp_string (buffer, "#pragma omp ordered");
goto dump_omp_body;
case OMP_CRITICAL:
pp_string (buffer, "#pragma omp critical");
if (OMP_CRITICAL_NAME (node))
{
pp_space (buffer);
pp_character (buffer, '(');
dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc,
flags, false);
pp_character (buffer, ')');
}
goto dump_omp_body;
case OMP_ATOMIC:
pp_string (buffer, "#pragma omp atomic");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_ATOMIC_READ:
pp_string (buffer, "#pragma omp atomic read");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
break;
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
pp_string (buffer, "#pragma omp atomic capture");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_SINGLE:
pp_string (buffer, "#pragma omp single");
dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_CLAUSE:
dump_omp_clause (buffer, node, spc, flags);
is_expr = false;
break;
case TRANSACTION_EXPR:
if (TRANSACTION_EXPR_OUTER (node))
pp_string (buffer, "__transaction_atomic [[outer]]");
else if (TRANSACTION_EXPR_RELAXED (node))
pp_string (buffer, "__transaction_relaxed");
else
pp_string (buffer, "__transaction_atomic");
if (!(flags & TDF_SLIM) && TRANSACTION_EXPR_BODY (node))
{
newline_and_indent (buffer, spc);
pp_character (buffer, '{');
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, TRANSACTION_EXPR_BODY (node),
spc + 2, flags, false);
newline_and_indent (buffer, spc);
pp_character (buffer, '}');
}
is_expr = false;
break;
case REDUC_MAX_EXPR:
pp_string (buffer, " REDUC_MAX_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_MIN_EXPR:
pp_string (buffer, " REDUC_MIN_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case REDUC_PLUS_EXPR:
pp_string (buffer, " REDUC_PLUS_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_MULT_HI_EXPR:
pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_MULT_LO_EXPR:
pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_LSHIFT_HI_EXPR:
pp_string (buffer, " VEC_WIDEN_LSHIFT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_WIDEN_LSHIFT_LO_EXPR:
pp_string (buffer, " VEC_WIDEN_LSHIFT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_HI_EXPR:
pp_string (buffer, " VEC_UNPACK_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_LO_EXPR:
pp_string (buffer, " VEC_UNPACK_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_FLOAT_HI_EXPR:
pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_UNPACK_FLOAT_LO_EXPR:
pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_TRUNC_EXPR:
pp_string (buffer, " VEC_PACK_TRUNC_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_SAT_EXPR:
pp_string (buffer, " VEC_PACK_SAT_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case VEC_PACK_FIX_TRUNC_EXPR:
pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (buffer, " > ");
break;
case BLOCK:
dump_block_node (buffer, node, spc, flags);
break;
default:
NIY;
}
if (is_stmt && is_expr)
pp_semicolon (buffer);
/* If we're building a diagnostic, the formatted text will be written
into BUFFER's stream by the caller; otherwise, write it now. */
if (!(flags & TDF_DIAGNOSTIC))
pp_write_text_to_stream (buffer);
return spc;
}
/* Print the declaration of a variable. */
void
print_declaration (pretty_printer *buffer, tree t, int spc, int flags)
{
INDENT (spc);
if (TREE_CODE (t) == TYPE_DECL)
pp_string (buffer, "typedef ");
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t))
pp_string (buffer, "register ");
if (TREE_PUBLIC (t) && DECL_EXTERNAL (t))
pp_string (buffer, "extern ");
else if (TREE_STATIC (t))
pp_string (buffer, "static ");
/* Print the type and name. */
if (TREE_TYPE (t) && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
tree tmp;
/* Print array's type. */
tmp = TREE_TYPE (t);
while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE)
tmp = TREE_TYPE (tmp);
dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
/* Print the dimensions. */
tmp = TREE_TYPE (t);
while (TREE_CODE (tmp) == ARRAY_TYPE)
{
dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags);
tmp = TREE_TYPE (tmp);
}
}
else if (TREE_CODE (t) == FUNCTION_DECL)
{
dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false);
pp_space (buffer);
dump_decl_name (buffer, t, flags);
dump_function_declaration (buffer, TREE_TYPE (t), spc, flags);
}
else
{
/* Print type declaration. */
dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false);
/* Print variable's name. */
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
}
if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t))
{
pp_string (buffer, " __asm__ ");
pp_character (buffer, '(');
dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false);
pp_character (buffer, ')');
}
/* The initial value of a function serves to determine whether the function
is declared or defined. So the following does not apply to function
nodes. */
if (TREE_CODE (t) != FUNCTION_DECL)
{
/* Print the initial value. */
if (DECL_INITIAL (t))
{
pp_space (buffer);
pp_character (buffer, '=');
pp_space (buffer);
dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false);
}
}
if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
{
pp_string (buffer, " [value-expr: ");
dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false);
pp_character (buffer, ']');
}
pp_character (buffer, ';');
}
/* Prints a structure: name, fields, and methods.
FIXME: Still incomplete. */
static void
print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags)
{
/* Print the name of the structure. */
if (TYPE_NAME (node))
{
INDENT (spc);
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (buffer, "struct ");
else if ((TREE_CODE (node) == UNION_TYPE
|| TREE_CODE (node) == QUAL_UNION_TYPE))
pp_string (buffer, "union ");
dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false);
}
/* Print the contents of the structure. */
pp_newline (buffer);
INDENT (spc);
pp_character (buffer, '{');
pp_newline (buffer);
/* Print the fields of the structure. */
{
tree tmp;
tmp = TYPE_FIELDS (node);
while (tmp)
{
/* Avoid to print recursively the structure. */
/* FIXME : Not implemented correctly...,
what about the case when we have a cycle in the contain graph? ...
Maybe this could be solved by looking at the scope in which the
structure was declared. */
if (TREE_TYPE (tmp) != node
&& (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE
|| TREE_TYPE (TREE_TYPE (tmp)) != node))
{
print_declaration (buffer, tmp, spc+2, flags);
pp_newline (buffer);
}
tmp = DECL_CHAIN (tmp);
}
}
INDENT (spc);
pp_character (buffer, '}');
}
/* Return the priority of the operator CODE.
From lowest to highest precedence with either left-to-right (L-R)
or right-to-left (R-L) associativity]:
1 [L-R] ,
2 [R-L] = += -= *= /= %= &= ^= |= <<= >>=
3 [R-L] ?:
4 [L-R] ||
5 [L-R] &&
6 [L-R] |
7 [L-R] ^
8 [L-R] &
9 [L-R] == !=
10 [L-R] < <= > >=
11 [L-R] << >>
12 [L-R] + -
13 [L-R] * / %
14 [R-L] ! ~ ++ -- + - * & (type) sizeof
15 [L-R] fn() [] -> .
unary +, - and * have higher precedence than the corresponding binary
operators. */
int
op_code_prio (enum tree_code code)
{
switch (code)
{
case TREE_LIST:
case COMPOUND_EXPR:
case BIND_EXPR:
return 1;
case MODIFY_EXPR:
case INIT_EXPR:
return 2;
case COND_EXPR:
return 3;
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return 4;
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return 5;
case BIT_IOR_EXPR:
return 6;
case BIT_XOR_EXPR:
case TRUTH_XOR_EXPR:
return 7;
case BIT_AND_EXPR:
return 8;
case EQ_EXPR:
case NE_EXPR:
return 9;
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
return 10;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
case WIDEN_LSHIFT_EXPR:
return 11;
case WIDEN_SUM_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case MINUS_EXPR:
return 12;
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case FMA_EXPR:
return 13;
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case NEGATE_EXPR:
case INDIRECT_REF:
case ADDR_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
case FIX_TRUNC_EXPR:
case TARGET_EXPR:
return 14;
case CALL_EXPR:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
return 15;
/* Special expressions. */
case MIN_EXPR:
case MAX_EXPR:
case ABS_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case REDUC_MAX_EXPR:
case REDUC_MIN_EXPR:
case REDUC_PLUS_EXPR:
case VEC_LSHIFT_EXPR:
case VEC_RSHIFT_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
return 16;
default:
/* Return an arbitrarily high precedence to avoid surrounding single
VAR_DECLs in ()s. */
return 9999;
}
}
/* Return the priority of the operator OP. */
int
op_prio (const_tree op)
{
enum tree_code code;
if (op == NULL)
return 9999;
code = TREE_CODE (op);
if (code == SAVE_EXPR || code == NON_LVALUE_EXPR)
return op_prio (TREE_OPERAND (op, 0));
return op_code_prio (code);
}
/* Return the symbol associated with operator CODE. */
const char *
op_symbol_code (enum tree_code code)
{
switch (code)
{
case MODIFY_EXPR:
return "=";
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return "||";
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return "&&";
case BIT_IOR_EXPR:
return "|";
case TRUTH_XOR_EXPR:
case BIT_XOR_EXPR:
return "^";
case ADDR_EXPR:
case BIT_AND_EXPR:
return "&";
case ORDERED_EXPR:
return "ord";
case UNORDERED_EXPR:
return "unord";
case EQ_EXPR:
return "==";
case UNEQ_EXPR:
return "u==";
case NE_EXPR:
return "!=";
case LT_EXPR:
return "<";
case UNLT_EXPR:
return "u<";
case LE_EXPR:
return "<=";
case UNLE_EXPR:
return "u<=";
case GT_EXPR:
return ">";
case UNGT_EXPR:
return "u>";
case GE_EXPR:
return ">=";
case UNGE_EXPR:
return "u>=";
case LTGT_EXPR:
return "<>";
case LSHIFT_EXPR:
return "<<";
case RSHIFT_EXPR:
return ">>";
case LROTATE_EXPR:
return "r<<";
case RROTATE_EXPR:
return "r>>";
case VEC_LSHIFT_EXPR:
return "v<<";
case VEC_RSHIFT_EXPR:
return "v>>";
case WIDEN_LSHIFT_EXPR:
return "w<<";
case POINTER_PLUS_EXPR:
return "+";
case PLUS_EXPR:
return "+";
case REDUC_PLUS_EXPR:
return "r+";
case WIDEN_SUM_EXPR:
return "w+";
case WIDEN_MULT_EXPR:
return "w*";
case NEGATE_EXPR:
case MINUS_EXPR:
return "-";
case BIT_NOT_EXPR:
return "~";
case TRUTH_NOT_EXPR:
return "!";
case MULT_EXPR:
case INDIRECT_REF:
return "*";
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
return "/";
case CEIL_DIV_EXPR:
return "/[cl]";
case FLOOR_DIV_EXPR:
return "/[fl]";
case ROUND_DIV_EXPR:
return "/[rd]";
case EXACT_DIV_EXPR:
return "/[ex]";
case TRUNC_MOD_EXPR:
return "%";
case CEIL_MOD_EXPR:
return "%[cl]";
case FLOOR_MOD_EXPR:
return "%[fl]";
case ROUND_MOD_EXPR:
return "%[rd]";
case PREDECREMENT_EXPR:
return " --";
case PREINCREMENT_EXPR:
return " ++";
case POSTDECREMENT_EXPR:
return "-- ";
case POSTINCREMENT_EXPR:
return "++ ";
case MAX_EXPR:
return "max";
case MIN_EXPR:
return "min";
default:
return "<<< ??? >>>";
}
}
/* Return the symbol associated with operator OP. */
static const char *
op_symbol (const_tree op)
{
return op_symbol_code (TREE_CODE (op));
}
/* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or
the gimple_call_fn of a GIMPLE_CALL. */
void
print_call_name (pretty_printer *buffer, tree node, int flags)
{
tree op0 = node;
if (TREE_CODE (op0) == NON_LVALUE_EXPR)
op0 = TREE_OPERAND (op0, 0);
again:
switch (TREE_CODE (op0))
{
case VAR_DECL:
case PARM_DECL:
case FUNCTION_DECL:
dump_function_name (buffer, op0, flags);
break;
case ADDR_EXPR:
case INDIRECT_REF:
case NOP_EXPR:
op0 = TREE_OPERAND (op0, 0);
goto again;
case COND_EXPR:
pp_string (buffer, "(");
dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, flags, false);
pp_string (buffer, ") ? ");
dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, flags, false);
pp_string (buffer, " : ");
dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, flags, false);
break;
case ARRAY_REF:
if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL)
dump_function_name (buffer, TREE_OPERAND (op0, 0), flags);
else
dump_generic_node (buffer, op0, 0, flags, false);
break;
case MEM_REF:
if (integer_zerop (TREE_OPERAND (op0, 1)))
{
op0 = TREE_OPERAND (op0, 0);
goto again;
}
/* Fallthru. */
case COMPONENT_REF:
case SSA_NAME:
case OBJ_TYPE_REF:
dump_generic_node (buffer, op0, 0, flags, false);
break;
default:
NIY;
}
}
/* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */
static void
pretty_print_string (pretty_printer *buffer, const char *str)
{
if (str == NULL)
return;
while (*str)
{
switch (str[0])
{
case '\b':
pp_string (buffer, "\\b");
break;
case '\f':
pp_string (buffer, "\\f");
break;
case '\n':
pp_string (buffer, "\\n");
break;
case '\r':
pp_string (buffer, "\\r");
break;
case '\t':
pp_string (buffer, "\\t");
break;
case '\v':
pp_string (buffer, "\\v");
break;
case '\\':
pp_string (buffer, "\\\\");
break;
case '\"':
pp_string (buffer, "\\\"");
break;
case '\'':
pp_string (buffer, "\\'");
break;
/* No need to handle \0; the loop terminates on \0. */
case '\1':
pp_string (buffer, "\\1");
break;
case '\2':
pp_string (buffer, "\\2");
break;
case '\3':
pp_string (buffer, "\\3");
break;
case '\4':
pp_string (buffer, "\\4");
break;
case '\5':
pp_string (buffer, "\\5");
break;
case '\6':
pp_string (buffer, "\\6");
break;
case '\7':
pp_string (buffer, "\\7");
break;
default:
pp_character (buffer, str[0]);
break;
}
str++;
}
}
static void
maybe_init_pretty_print (FILE *file)
{
if (!initialized)
{
pp_construct (&buffer, /* prefix */NULL, /* line-width */0);
pp_needs_newline (&buffer) = true;
pp_translate_identifiers (&buffer) = false;
initialized = 1;
}
buffer.buffer->stream = file;
}
static void
newline_and_indent (pretty_printer *buffer, int spc)
{
pp_newline (buffer);
INDENT (spc);
}
/* Handle a %K format for TEXT. Separate from default_tree_printer so
it can also be used in front ends.
%K: a statement, from which EXPR_LOCATION and TREE_BLOCK will be recorded.
*/
void
percent_K_format (text_info *text)
{
tree t = va_arg (*text->args_ptr, tree), block;
gcc_assert (text->locus != NULL);
*text->locus = EXPR_LOCATION (t);
gcc_assert (pp_ti_abstract_origin (text) != NULL);
block = TREE_BLOCK (t);
*pp_ti_abstract_origin (text) = NULL;
while (block
&& TREE_CODE (block) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (block))
{
tree ao = BLOCK_ABSTRACT_ORIGIN (block);
while (TREE_CODE (ao) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (ao)
&& BLOCK_ABSTRACT_ORIGIN (ao) != ao)
ao = BLOCK_ABSTRACT_ORIGIN (ao);
if (TREE_CODE (ao) == FUNCTION_DECL)
{
*pp_ti_abstract_origin (text) = block;
break;
}
block = BLOCK_SUPERCONTEXT (block);
}
}
/* Print the identifier ID to PRETTY-PRINTER. */
void
pp_base_tree_identifier (pretty_printer *pp, tree id)
{
if (pp_translate_identifiers (pp))
{
const char *text = identifier_to_locale (IDENTIFIER_POINTER (id));
pp_append_text (pp, text, text + strlen (text));
}
else
pp_append_text (pp, IDENTIFIER_POINTER (id),
IDENTIFIER_POINTER (id) + IDENTIFIER_LENGTH (id));
}
/* A helper function that is used to dump function information before the
function dump. */
void
dump_function_header (FILE *dump_file, tree fdecl, int flags)
{
const char *dname, *aname;
struct cgraph_node *node = cgraph_get_node (fdecl);
struct function *fun = DECL_STRUCT_FUNCTION (fdecl);
dname = lang_hooks.decl_printable_name (fdecl, 2);
if (DECL_ASSEMBLER_NAME_SET_P (fdecl))
aname = (IDENTIFIER_POINTER
(DECL_ASSEMBLER_NAME (fdecl)));
else
aname = "<unset-asm-name>";
fprintf (dump_file, "\n;; Function %s (%s, funcdef_no=%d",
dname, aname, fun->funcdef_no);
if (!(flags & TDF_NOUID))
fprintf (dump_file, ", decl_uid=%d", DECL_UID (fdecl));
if (node)
{
fprintf (dump_file, ", cgraph_uid=%d)%s\n\n", node->uid,
node->frequency == NODE_FREQUENCY_HOT
? " (hot)"
: node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
? " (unlikely executed)"
: node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
? " (executed once)"
: "");
}
else
fprintf (dump_file, ")\n\n");
}
|
GB_unop__minv_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint64_uint64)
// op(A') function: GB (_unop_tran__minv_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 64) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 64) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 64) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Main.c | #include "XSbench_header.h"
#include "cudaHeader.h"
#include <limits.h>
#define ISOTOPES 130
#define ISOTOPES 355
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
unsigned long seed;
size_t memtotal;
int n_isotopes; // H-M Large is 355, H-M Small is 68
int n_gridpoints = 11303;
// int lookups = 10000;
int lookups = 50000;
// int lookups = 100000;
int i, thread, nthreads, mat;
double omp_start, omp_end, p_energy;
int max_procs = omp_get_num_procs();
char * HM;
int bgq_mode = 0;
int kernelId =0;
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
// srand(time(NULL)); //commented this so that same addresses are generated
// across the runs
srand(INT_MAX);
// Process CLI Fields
// Usage: ./XSBench <# threads> <H-M Size ("Small or "Large")> <BGQ mode>
// # threads - The number of threads you wish to run
// H-M Size - The problem size (small = 68 nuclides, large = 355 nuclides)
// BGQ Mode - Number of ranks - no real effect, save for stamping the
// results.txt printout
// Note - No arguments are required - default parameters will be used if
// no arguments are given.
if( argc == 2 )
{
nthreads = atoi(argv[1]); // first arg sets # of threads
n_isotopes = ISOTOPES; // defaults to H-M Large
}
else if( argc == 3 )
{
nthreads = atoi(argv[1]); // first arg sets # of threads
// second arg species small or large H-M benchmark
if( strcmp( argv[2], "small") == 0 || strcmp( argv[2], "Small" ) == 0)
n_isotopes = 68;
else
n_isotopes = ISOTOPES;
}
else if( argc == 4 )
{
kernelId = atoi(argv[3]);
nthreads = atoi(argv[1]); // first arg sets # of threads
// second arg species small or large H-M benchmark
if( strcmp( argv[2], "small") == 0 || strcmp( argv[2], "Small" ) == 0)
n_isotopes = 68;
else
n_isotopes = ISOTOPES;
}
else if(argc == 5)
{
n_gridpoints = atoi(argv[4]);
kernelId = atoi(argv[3]);
nthreads = atoi(argv[1]); // first arg sets # of threads
// second arg species small or large H-M benchmark
if( strcmp( argv[2], "small") == 0 || strcmp( argv[2], "Small" ) == 0)
n_isotopes = 68;
else
n_isotopes = ISOTOPES;
}
else
{
nthreads = max_procs; // defaults to full CPU usage
n_isotopes = ISOTOPES; // defaults to H-M Large
}
// Sets H-M size name
if( n_isotopes == 68 )
HM = "Small";
else
HM = "Large";
// Set number of OpenMP Threads
omp_set_num_threads(nthreads);
// =====================================================================
// Calculate Estimate of Memory Usage
// =====================================================================
size_t single_nuclide_grid = n_gridpoints * sizeof( NuclideGridPoint );
size_t all_nuclide_grids = n_isotopes * single_nuclide_grid;
size_t size_GridPoint =sizeof(GridPoint)+n_isotopes*sizeof(int);
size_t size_UEG = n_isotopes*n_gridpoints * size_GridPoint;
int mem_tot;
memtotal = all_nuclide_grids + size_UEG;
all_nuclide_grids = all_nuclide_grids / 1000000;//48576;
size_UEG = size_UEG / 1000000;//48576;
memtotal = memtotal / 1000000;//48576;
mem_tot = memtotal;
// =====================================================================
// Print-out of Input Summary
// =====================================================================
logo();
center_print("INPUT SUMMARY", 79);
border_print();
printf("Materials: %d\n", 12);
printf("H-M Benchmark Size: %s\n", HM);
printf("Total Isotopes: %d\n", n_isotopes);
printf("Gridpoints (per Nuclide): ");
fancy_int(n_gridpoints);
printf("\nUnionized Energy Gridpoints: ");
fancy_int(n_isotopes*n_gridpoints);
printf("\nXS Lookups: ");
fancy_int(lookups);
printf("\nThreads: %d\n", nthreads);
printf("Est. Memory Usage (MB): ");
fancy_int(mem_tot);
printf("\n");
if( EXTRA_FLOPS > 0 )
printf("Extra Flops: %d\n", EXTRA_FLOPS);
if( EXTRA_LOADS > 0 )
printf("Extra Loads: %d\n", EXTRA_LOADS);
border_print();
center_print("\nINITIALIZATION", 79);
border_print();
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// Allocate & fill energy grids
printf("Generating Nuclide Energy Grids...\n");
NuclideGridPoint ** nuclide_grids = gpmatrix( n_isotopes, n_gridpoints );
generate_grids( nuclide_grids, n_isotopes, n_gridpoints );
// Sort grids by energy
sort_nuclide_grids( nuclide_grids, n_isotopes, n_gridpoints );
// Prepare Unionized Energy Grid Framework
GridPoint * energy_grid = generate_energy_grid( n_isotopes, n_gridpoints,
nuclide_grids );
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
omp_start = omp_get_wtime();
set_grid_ptrs( energy_grid, nuclide_grids, n_isotopes, n_gridpoints );
omp_end = omp_get_wtime();
printf("Pointer calculation took %f seconds\n", omp_end-omp_start);
// Get material data
printf("Loading Mats...\n");
int *num_nucs = load_num_nucs(n_isotopes);
int **mats = load_mats(num_nucs, n_isotopes);
double **concs = load_concs(num_nucs);
double * results = (double*) malloc(N_ELEMENTS*NUM_RESULTS * sizeof(double));
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
border_print();
center_print("SIMULATION", 79);
border_print();
omp_start = omp_get_wtime();
#ifdef __PAPI
int eventset = PAPI_NULL;
int num_papi_events;
counter_init(&eventset, &num_papi_events);
#endif
// OpenMP compiler directives - declaring variables as shared or private
#pragma omp parallel default(none) \
private(i, thread, p_energy, mat, seed) \
shared( max_procs, n_isotopes, n_gridpoints, \
energy_grid, nuclide_grids, lookups, nthreads, \
mats, concs, num_nucs, results)
{
double macro_xs_vector[5];
thread = omp_get_thread_num();
seed = (thread+1)*19+17;
#pragma omp for
for( i = 0; i < lookups; i++ )
{
// Status text
if( INFO && thread == 0 && i % 1000 == 0 )
printf("\rCalculating XS's... (%.0lf%% completed)",
i / ( lookups / (double) nthreads ) * 100.0);
#if(STRIP_RANDOM==1)
p_energy = 0.01 + (((double)(i%10))/10.0) + (((double)(i%1000))/1000.0);
p_energy -= ((int)(p_energy));
//p_energy = i/(float)lookups;
mat = i %12;
#else
// Randomly pick an energy and material for the particle
p_energy = rn(&seed);
mat = pick_mat(&seed);
#endif
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
calculate_macro_xs( p_energy, mat, n_isotopes,
n_gridpoints, num_nucs, concs,
energy_grid, nuclide_grids, mats,
macro_xs_vector );
#if(STRIP_RANDOM ==1)
if( i < NUM_RESULTS)
{
memcpy(&results[5*i], ¯o_xs_vector[0], 5*sizeof(double));
}
#endif
}
}
omp_end = omp_get_wtime();
printf("\n" );
printf("Simulation complete.\n" );
double cudaLookupRate = cudaDriver(lookups, n_isotopes, n_gridpoints, 12, num_nucs, energy_grid, concs, nuclide_grids, mats, results, kernelId);
// =====================================================================
// Print / Save Results and Exit
// =====================================================================
border_print();
center_print("RESULTS", 79);
border_print();
// Print the results
printf("Threads: %d\n", nthreads);
if( EXTRA_FLOPS > 0 )
printf("Extra Flops: %d\n", EXTRA_FLOPS);
if( EXTRA_LOADS > 0 )
printf("Extra Loads: %d\n", EXTRA_LOADS);
printf("Runtime: %.3lf seconds\n", omp_end-omp_start);
printf("Lookups: "); fancy_int(lookups);
double cpuLookupRate = ((double) lookups / (omp_end-omp_start));
printf("\n");
printf("CPU %d threads \tLookups/s: ", nthreads);
fancy_int((int) cpuLookupRate);
printf("\n");
printf("CUDA Port \tLookups/s: ");
fancy_int((int)cudaLookupRate);
printf("\t %.2fX CPU\n", (cudaLookupRate/cpuLookupRate));
border_print();
// For bechmarking, output lookup/s data to file
if( SAVE )
{
FILE * out = fopen( "results.txt", "a" );
fprintf(out, "c%d\t%d\t%.0lf\n", bgq_mode, nthreads,
(double) lookups / (omp_end-omp_start));
fclose(out);
}
#ifdef __PAPI
counter_stop(&eventset, num_papi_events);
#endif
free(results);
return 0;
}
|
convolutiondepthwise_5x5_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__fp16 bias0_data[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* r5 = img0.row<const __fp16>(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
const __fp16* bias0_data_ptr = bias ? bias + g * 8 : bias0_data;
asm volatile(
"prfm pldl1keep, [%18, #512] \n"
"ld1 {v31.8h}, [%18] \n" // sum13
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" // r0_0123
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v24.16b, v31.16b \n" // sum00
"mov v25.16b, v31.16b \n" // sum01
"mov v26.16b, v31.16b \n" // sum02
"mov v27.16b, v31.16b \n" // sum03
"fmla v24.8h, v16.8h, v0.8h \n"
"fmla v25.8h, v17.8h, v0.8h \n"
"fmla v26.8h, v18.8h, v0.8h \n"
"fmla v27.8h, v19.8h, v0.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2] \n" // r0_4567
"fmla v24.8h, v17.8h, v1.8h \n"
"fmla v25.8h, v18.8h, v1.8h \n"
"fmla v26.8h, v19.8h, v1.8h \n"
"fmla v27.8h, v20.8h, v1.8h \n"
"mov v28.16b, v31.16b \n" // sum10
"fmla v24.8h, v18.8h, v2.8h \n"
"fmla v25.8h, v19.8h, v2.8h \n"
"fmla v26.8h, v20.8h, v2.8h \n"
"fmla v27.8h, v21.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v24.8h, v19.8h, v3.8h \n"
"fmla v25.8h, v20.8h, v3.8h \n"
"fmla v26.8h, v21.8h, v3.8h \n"
"fmla v27.8h, v22.8h, v3.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // r1_0123
"fmla v24.8h, v20.8h, v4.8h \n"
"fmla v25.8h, v21.8h, v4.8h \n"
"fmla v26.8h, v22.8h, v4.8h \n"
"fmla v27.8h, v23.8h, v4.8h \n"
"mov v29.16b, v31.16b \n" // sum11
"mov v30.16b, v31.16b \n" // sum12
"fmla v28.8h, v8.8h, v0.8h \n"
"fmla v29.8h, v9.8h, v0.8h \n"
"fmla v30.8h, v10.8h, v0.8h \n"
"fmla v31.8h, v11.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3] \n" // r1_4567
"fmla v28.8h, v9.8h, v1.8h \n"
"fmla v29.8h, v10.8h, v1.8h \n"
"fmla v30.8h, v11.8h, v1.8h \n"
"fmla v31.8h, v12.8h, v1.8h \n"
"fmla v28.8h, v10.8h, v2.8h \n"
"fmla v29.8h, v11.8h, v2.8h \n"
"fmla v30.8h, v12.8h, v2.8h \n"
"fmla v31.8h, v13.8h, v2.8h \n"
"fmla v28.8h, v11.8h, v3.8h \n"
"fmla v29.8h, v12.8h, v3.8h \n"
"fmla v30.8h, v13.8h, v3.8h \n"
"fmla v31.8h, v14.8h, v3.8h \n"
"fmla v28.8h, v12.8h, v4.8h \n"
"fmla v29.8h, v13.8h, v4.8h \n"
"fmla v30.8h, v14.8h, v4.8h \n"
"fmla v31.8h, v15.8h, v4.8h \n"
"fmla v24.8h, v8.8h, v5.8h \n"
"fmla v25.8h, v9.8h, v5.8h \n"
"fmla v26.8h, v10.8h, v5.8h \n"
"fmla v27.8h, v11.8h, v5.8h \n"
"fmla v24.8h, v9.8h, v6.8h \n"
"fmla v25.8h, v10.8h, v6.8h \n"
"fmla v26.8h, v11.8h, v6.8h \n"
"fmla v27.8h, v12.8h, v6.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v24.8h, v10.8h, v7.8h \n"
"fmla v25.8h, v11.8h, v7.8h \n"
"fmla v26.8h, v12.8h, v7.8h \n"
"fmla v27.8h, v13.8h, v7.8h \n"
"fmla v24.8h, v11.8h, v0.8h \n"
"fmla v25.8h, v12.8h, v0.8h \n"
"fmla v26.8h, v13.8h, v0.8h \n"
"fmla v27.8h, v14.8h, v0.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" // r2_0123
"fmla v24.8h, v12.8h, v1.8h \n"
"fmla v25.8h, v13.8h, v1.8h \n"
"fmla v26.8h, v14.8h, v1.8h \n"
"fmla v27.8h, v15.8h, v1.8h \n"
"fmla v28.8h, v16.8h, v5.8h \n"
"fmla v29.8h, v17.8h, v5.8h \n"
"fmla v30.8h, v18.8h, v5.8h \n"
"fmla v31.8h, v19.8h, v5.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" // r2_4567
"fmla v28.8h, v17.8h, v6.8h \n"
"fmla v29.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v6.8h \n"
"fmla v31.8h, v20.8h, v6.8h \n"
"fmla v28.8h, v18.8h, v7.8h \n"
"fmla v29.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v7.8h \n"
"fmla v31.8h, v21.8h, v7.8h \n"
"fmla v28.8h, v19.8h, v0.8h \n"
"fmla v29.8h, v20.8h, v0.8h \n"
"fmla v30.8h, v21.8h, v0.8h \n"
"fmla v31.8h, v22.8h, v0.8h \n"
"fmla v28.8h, v20.8h, v1.8h \n"
"fmla v29.8h, v21.8h, v1.8h \n"
"fmla v30.8h, v22.8h, v1.8h \n"
"fmla v31.8h, v23.8h, v1.8h \n"
"fmla v24.8h, v16.8h, v2.8h \n"
"fmla v25.8h, v17.8h, v2.8h \n"
"fmla v26.8h, v18.8h, v2.8h \n"
"fmla v27.8h, v19.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v24.8h, v17.8h, v3.8h \n"
"fmla v25.8h, v18.8h, v3.8h \n"
"fmla v26.8h, v19.8h, v3.8h \n"
"fmla v27.8h, v20.8h, v3.8h \n"
"fmla v24.8h, v18.8h, v4.8h \n"
"fmla v25.8h, v19.8h, v4.8h \n"
"fmla v26.8h, v20.8h, v4.8h \n"
"fmla v27.8h, v21.8h, v4.8h \n"
"fmla v24.8h, v19.8h, v5.8h \n"
"fmla v25.8h, v20.8h, v5.8h \n"
"fmla v26.8h, v21.8h, v5.8h \n"
"fmla v27.8h, v22.8h, v5.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%5], #64 \n" // r3_0123
"fmla v24.8h, v20.8h, v6.8h \n"
"fmla v25.8h, v21.8h, v6.8h \n"
"fmla v26.8h, v22.8h, v6.8h \n"
"fmla v27.8h, v23.8h, v6.8h \n"
"fmla v28.8h, v8.8h, v2.8h \n"
"fmla v29.8h, v9.8h, v2.8h \n"
"fmla v30.8h, v10.8h, v2.8h \n"
"fmla v31.8h, v11.8h, v2.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%5] \n" // r3_4567
"fmla v28.8h, v9.8h, v3.8h \n"
"fmla v29.8h, v10.8h, v3.8h \n"
"fmla v30.8h, v11.8h, v3.8h \n"
"fmla v31.8h, v12.8h, v3.8h \n"
"fmla v28.8h, v10.8h, v4.8h \n"
"fmla v29.8h, v11.8h, v4.8h \n"
"fmla v30.8h, v12.8h, v4.8h \n"
"fmla v31.8h, v13.8h, v4.8h \n"
"fmla v28.8h, v11.8h, v5.8h \n"
"fmla v29.8h, v12.8h, v5.8h \n"
"fmla v30.8h, v13.8h, v5.8h \n"
"fmla v31.8h, v14.8h, v5.8h \n"
"fmla v28.8h, v12.8h, v6.8h \n"
"fmla v29.8h, v13.8h, v6.8h \n"
"fmla v30.8h, v14.8h, v6.8h \n"
"fmla v31.8h, v15.8h, v6.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v24.8h, v8.8h, v7.8h \n"
"fmla v25.8h, v9.8h, v7.8h \n"
"fmla v26.8h, v10.8h, v7.8h \n"
"fmla v27.8h, v11.8h, v7.8h \n"
"fmla v24.8h, v9.8h, v0.8h \n"
"fmla v25.8h, v10.8h, v0.8h \n"
"fmla v26.8h, v11.8h, v0.8h \n"
"fmla v27.8h, v12.8h, v0.8h \n"
"fmla v24.8h, v10.8h, v1.8h \n"
"fmla v25.8h, v11.8h, v1.8h \n"
"fmla v26.8h, v12.8h, v1.8h \n"
"fmla v27.8h, v13.8h, v1.8h \n"
"fmla v24.8h, v11.8h, v2.8h \n"
"fmla v25.8h, v12.8h, v2.8h \n"
"fmla v26.8h, v13.8h, v2.8h \n"
"fmla v27.8h, v14.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" // r4_0123
"fmla v24.8h, v12.8h, v3.8h \n"
"fmla v25.8h, v13.8h, v3.8h \n"
"fmla v26.8h, v14.8h, v3.8h \n"
"fmla v27.8h, v15.8h, v3.8h \n"
"fmla v28.8h, v16.8h, v7.8h \n"
"fmla v29.8h, v17.8h, v7.8h \n"
"fmla v30.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v7.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" // r4_4567
"fmla v28.8h, v17.8h, v0.8h \n"
"fmla v29.8h, v18.8h, v0.8h \n"
"fmla v30.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v0.8h \n"
"fmla v28.8h, v18.8h, v1.8h \n"
"fmla v29.8h, v19.8h, v1.8h \n"
"fmla v30.8h, v20.8h, v1.8h \n"
"fmla v31.8h, v21.8h, v1.8h \n"
"fmla v28.8h, v19.8h, v2.8h \n"
"fmla v29.8h, v20.8h, v2.8h \n"
"fmla v30.8h, v21.8h, v2.8h \n"
"fmla v31.8h, v22.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v28.8h, v20.8h, v3.8h \n"
"fmla v29.8h, v21.8h, v3.8h \n"
"fmla v30.8h, v22.8h, v3.8h \n"
"fmla v31.8h, v23.8h, v3.8h \n"
"fmla v24.8h, v16.8h, v4.8h \n"
"fmla v25.8h, v17.8h, v4.8h \n"
"fmla v26.8h, v18.8h, v4.8h \n"
"fmla v27.8h, v19.8h, v4.8h \n"
"fmla v24.8h, v17.8h, v5.8h \n"
"fmla v25.8h, v18.8h, v5.8h \n"
"fmla v26.8h, v19.8h, v5.8h \n"
"fmla v27.8h, v20.8h, v5.8h \n"
"fmla v24.8h, v18.8h, v6.8h \n"
"fmla v25.8h, v19.8h, v6.8h \n"
"fmla v26.8h, v20.8h, v6.8h \n"
"fmla v27.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v24.8h, v19.8h, v7.8h \n"
"fmla v25.8h, v20.8h, v7.8h \n"
"fmla v26.8h, v21.8h, v7.8h \n"
"fmla v27.8h, v22.8h, v7.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%7], #64 \n" // r5_0123
"fmla v24.8h, v20.8h, v0.8h \n"
"fmla v25.8h, v21.8h, v0.8h \n"
"fmla v26.8h, v22.8h, v0.8h \n"
"fmla v27.8h, v23.8h, v0.8h \n"
"fmla v28.8h, v8.8h, v4.8h \n"
"fmla v29.8h, v9.8h, v4.8h \n"
"fmla v30.8h, v10.8h, v4.8h \n"
"fmla v31.8h, v11.8h, v4.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%7] \n" // r5_4567
"fmla v28.8h, v9.8h, v5.8h \n"
"fmla v29.8h, v10.8h, v5.8h \n"
"fmla v30.8h, v11.8h, v5.8h \n"
"fmla v31.8h, v12.8h, v5.8h \n"
"fmla v28.8h, v10.8h, v6.8h \n"
"fmla v29.8h, v11.8h, v6.8h \n"
"fmla v30.8h, v12.8h, v6.8h \n"
"fmla v31.8h, v13.8h, v6.8h \n"
"fmla v28.8h, v11.8h, v7.8h \n"
"fmla v29.8h, v12.8h, v7.8h \n"
"fmla v30.8h, v13.8h, v7.8h \n"
"fmla v31.8h, v14.8h, v7.8h \n"
"fmla v28.8h, v12.8h, v0.8h \n"
"fmla v29.8h, v13.8h, v0.8h \n"
"fmla v30.8h, v14.8h, v0.8h \n"
"fmla v31.8h, v15.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"r"(bias0_data_ptr) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.8h, v17.8h}, [%2], #32 \n" // r0_01
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v28.16b, %18.16b \n" // sum00
"mov v29.16b, %18.16b \n" // sum01
"fmla v28.8h, v16.8h, v0.8h \n"
"fmla v29.8h, v17.8h, v0.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%2] \n" // r0_2345
"mov v30.16b, %18.16b \n" // sum10
"mov v31.16b, %18.16b \n" // sum11
"fmla v28.8h, v17.8h, v1.8h \n"
"fmla v29.8h, v18.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v28.8h, v18.8h, v2.8h \n"
"fmla v29.8h, v19.8h, v2.8h \n"
"fmla v28.8h, v19.8h, v3.8h \n"
"fmla v29.8h, v20.8h, v3.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v22.8h, v23.8h}, [%3], #32 \n" // r1_01
"fmla v28.8h, v20.8h, v4.8h \n"
"fmla v29.8h, v21.8h, v4.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%3] \n" // r1_2345
"fmla v30.8h, v22.8h, v0.8h \n"
"fmla v31.8h, v23.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v1.8h \n"
"fmla v31.8h, v24.8h, v1.8h \n"
"fmla v30.8h, v24.8h, v2.8h \n"
"fmla v31.8h, v25.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v3.8h \n"
"fmla v31.8h, v26.8h, v3.8h \n"
"fmla v30.8h, v26.8h, v4.8h \n"
"fmla v31.8h, v27.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v28.8h, v22.8h, v5.8h \n"
"fmla v29.8h, v23.8h, v5.8h \n"
"fmla v28.8h, v23.8h, v6.8h \n"
"fmla v29.8h, v24.8h, v6.8h \n"
"fmla v28.8h, v24.8h, v7.8h \n"
"fmla v29.8h, v25.8h, v7.8h \n"
"fmla v28.8h, v25.8h, v0.8h \n"
"fmla v29.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.8h, v17.8h}, [%4], #32 \n" // r2_01
"fmla v28.8h, v26.8h, v1.8h \n"
"fmla v29.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%4] \n" // r2_2345
"fmla v30.8h, v16.8h, v5.8h \n"
"fmla v31.8h, v17.8h, v5.8h \n"
"fmla v30.8h, v17.8h, v6.8h \n"
"fmla v31.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v0.8h \n"
"fmla v30.8h, v20.8h, v1.8h \n"
"fmla v31.8h, v21.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v28.8h, v16.8h, v2.8h \n"
"fmla v29.8h, v17.8h, v2.8h \n"
"fmla v28.8h, v17.8h, v3.8h \n"
"fmla v29.8h, v18.8h, v3.8h \n"
"fmla v28.8h, v18.8h, v4.8h \n"
"fmla v29.8h, v19.8h, v4.8h \n"
"fmla v28.8h, v19.8h, v5.8h \n"
"fmla v29.8h, v20.8h, v5.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v22.8h, v23.8h}, [%5], #32 \n" // r3_01
"fmla v28.8h, v20.8h, v6.8h \n"
"fmla v29.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%5] \n" // r3_2345
"fmla v30.8h, v22.8h, v2.8h \n"
"fmla v31.8h, v23.8h, v2.8h \n"
"fmla v30.8h, v23.8h, v3.8h \n"
"fmla v31.8h, v24.8h, v3.8h \n"
"fmla v30.8h, v24.8h, v4.8h \n"
"fmla v31.8h, v25.8h, v4.8h \n"
"fmla v30.8h, v25.8h, v5.8h \n"
"fmla v31.8h, v26.8h, v5.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v30.8h, v26.8h, v6.8h \n"
"fmla v31.8h, v27.8h, v6.8h \n"
"fmla v28.8h, v22.8h, v7.8h \n"
"fmla v29.8h, v23.8h, v7.8h \n"
"fmla v28.8h, v23.8h, v0.8h \n"
"fmla v29.8h, v24.8h, v0.8h \n"
"fmla v28.8h, v24.8h, v1.8h \n"
"fmla v29.8h, v25.8h, v1.8h \n"
"fmla v28.8h, v25.8h, v2.8h \n"
"fmla v29.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.8h, v17.8h}, [%6], #32 \n" // r4_01
"fmla v28.8h, v26.8h, v3.8h \n"
"fmla v29.8h, v27.8h, v3.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%6] \n" // r4_2345
"fmla v30.8h, v16.8h, v7.8h \n"
"fmla v31.8h, v17.8h, v7.8h \n"
"fmla v30.8h, v17.8h, v0.8h \n"
"fmla v31.8h, v18.8h, v0.8h \n"
"fmla v30.8h, v18.8h, v1.8h \n"
"fmla v31.8h, v19.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v30.8h, v19.8h, v2.8h \n"
"fmla v31.8h, v20.8h, v2.8h \n"
"fmla v30.8h, v20.8h, v3.8h \n"
"fmla v31.8h, v21.8h, v3.8h \n"
"fmla v28.8h, v16.8h, v4.8h \n"
"fmla v29.8h, v17.8h, v4.8h \n"
"fmla v28.8h, v17.8h, v5.8h \n"
"fmla v29.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v28.8h, v18.8h, v6.8h \n"
"fmla v29.8h, v19.8h, v6.8h \n"
"fmla v28.8h, v19.8h, v7.8h \n"
"fmla v29.8h, v20.8h, v7.8h \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v22.8h, v23.8h}, [%7], #32 \n" // r5_01
"fmla v28.8h, v20.8h, v0.8h \n"
"fmla v29.8h, v21.8h, v0.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%7] \n" // r5_2345
"fmla v30.8h, v22.8h, v4.8h \n"
"fmla v31.8h, v23.8h, v4.8h \n"
"fmla v30.8h, v23.8h, v5.8h \n"
"fmla v31.8h, v24.8h, v5.8h \n"
"fmla v30.8h, v24.8h, v6.8h \n"
"fmla v31.8h, v25.8h, v6.8h \n"
"fmla v30.8h, v25.8h, v7.8h \n"
"fmla v31.8h, v26.8h, v7.8h \n"
"fmla v30.8h, v26.8h, v0.8h \n"
"fmla v31.8h, v27.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
"st1 {v30.8h, v31.8h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"w"(_bias0) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.8h}, [%2], #16 \n" // r0_0
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%2] \n" // r0_1234
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v30.16b, %18.16b \n" // sum00
"mov v31.16b, %18.16b \n" // sum10
"fmla v30.8h, v16.8h, v0.8h \n"
"fmla v30.8h, v17.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v30.8h, v18.8h, v2.8h \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v21.8h}, [%3], #16 \n" // r1_0
"fmla v30.8h, v19.8h, v3.8h \n"
"fmla v30.8h, v20.8h, v4.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%3] \n" // r1_1234
"fmla v31.8h, v21.8h, v0.8h \n"
"fmla v31.8h, v22.8h, v1.8h \n"
"fmla v31.8h, v23.8h, v2.8h \n"
"fmla v31.8h, v24.8h, v3.8h \n"
"fmla v31.8h, v25.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v30.8h, v21.8h, v5.8h \n"
"fmla v30.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.8h}, [%4], #16 \n" // r2_0
"fmla v30.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%4] \n" // r2_1234
"fmla v31.8h, v16.8h, v5.8h \n"
"fmla v31.8h, v17.8h, v6.8h \n"
"fmla v31.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.8h}, [%5], #16 \n" // r3_0
"fmla v30.8h, v19.8h, v5.8h \n"
"fmla v30.8h, v20.8h, v6.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%5] \n" // r3_1234
"fmla v31.8h, v21.8h, v2.8h \n"
"fmla v31.8h, v22.8h, v3.8h \n"
"fmla v31.8h, v23.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v31.8h, v24.8h, v5.8h \n"
"fmla v31.8h, v25.8h, v6.8h \n"
"fmla v30.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v1.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v16.8h}, [%6], #16 \n" // r4_0
"fmla v30.8h, v24.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v3.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%6] \n" // r4_1234
"fmla v31.8h, v16.8h, v7.8h \n"
"fmla v31.8h, v17.8h, v0.8h \n"
"fmla v31.8h, v18.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v31.8h, v19.8h, v2.8h \n"
"fmla v31.8h, v20.8h, v3.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v21.8h}, [%7], #16 \n" // r5_0
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%7] \n" // r5_1234
"fmla v31.8h, v21.8h, v4.8h \n"
"fmla v31.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h}, [%0], #16 \n"
"st1 {v31.8h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"w"(_bias0) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v30", "v31");
}
r0 += 4 * 8 + w * 8;
r1 += 4 * 8 + w * 8;
r2 += 4 * 8 + w * 8;
r3 += 4 * 8 + w * 8;
r4 += 4 * 8 + w * 8;
r5 += 4 * 8 + w * 8;
outptr0 += outw * 8;
outptr1 += outw * 8;
}
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r0_0123
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v28.16b, %14.16b \n" // sum00
"mov v29.16b, %14.16b \n" // sum01
"mov v30.16b, %14.16b \n" // sum02
"mov v31.16b, %14.16b \n" // sum03
"fmla v28.8h, v12.8h, v0.8h \n"
"fmla v29.8h, v13.8h, v0.8h \n"
"fmla v30.8h, v14.8h, v0.8h \n"
"fmla v31.8h, v15.8h, v0.8h \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1] \n" // r0_4567
"fmla v28.8h, v13.8h, v1.8h \n"
"fmla v29.8h, v14.8h, v1.8h \n"
"fmla v30.8h, v15.8h, v1.8h \n"
"fmla v31.8h, v16.8h, v1.8h \n"
"fmla v28.8h, v14.8h, v2.8h \n"
"fmla v29.8h, v15.8h, v2.8h \n"
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v31.8h, v17.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v28.8h, v15.8h, v3.8h \n"
"fmla v29.8h, v16.8h, v3.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v31.8h, v18.8h, v3.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" // r1_0123
"fmla v28.8h, v16.8h, v4.8h \n"
"fmla v29.8h, v17.8h, v4.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"fmla v31.8h, v19.8h, v4.8h \n"
"fmla v28.8h, v20.8h, v5.8h \n"
"fmla v29.8h, v21.8h, v5.8h \n"
"fmla v30.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v5.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%2] \n" // r1_4567
"fmla v28.8h, v21.8h, v6.8h \n"
"fmla v29.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v28.8h, v22.8h, v7.8h \n"
"fmla v29.8h, v23.8h, v7.8h \n"
"fmla v30.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v7.8h \n"
"fmla v28.8h, v23.8h, v0.8h \n"
"fmla v29.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v25.8h, v0.8h \n"
"fmla v31.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r2_0123
"fmla v28.8h, v24.8h, v1.8h \n"
"fmla v29.8h, v25.8h, v1.8h \n"
"fmla v30.8h, v26.8h, v1.8h \n"
"fmla v31.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3] \n" // r2_4567
"fmla v28.8h, v12.8h, v2.8h \n"
"fmla v29.8h, v13.8h, v2.8h \n"
"fmla v30.8h, v14.8h, v2.8h \n"
"fmla v31.8h, v15.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v28.8h, v13.8h, v3.8h \n"
"fmla v29.8h, v14.8h, v3.8h \n"
"fmla v30.8h, v15.8h, v3.8h \n"
"fmla v31.8h, v16.8h, v3.8h \n"
"fmla v28.8h, v14.8h, v4.8h \n"
"fmla v29.8h, v15.8h, v4.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v31.8h, v17.8h, v4.8h \n"
"fmla v28.8h, v15.8h, v5.8h \n"
"fmla v29.8h, v16.8h, v5.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"fmla v31.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" // r3_0123
"fmla v28.8h, v16.8h, v6.8h \n"
"fmla v29.8h, v17.8h, v6.8h \n"
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v31.8h, v19.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v28.8h, v20.8h, v7.8h \n"
"fmla v29.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v7.8h \n"
"fmla v31.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%4] \n" // r3_4567
"fmla v28.8h, v21.8h, v0.8h \n"
"fmla v29.8h, v22.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v0.8h \n"
"fmla v31.8h, v24.8h, v0.8h \n"
"fmla v28.8h, v22.8h, v1.8h \n"
"fmla v29.8h, v23.8h, v1.8h \n"
"fmla v30.8h, v24.8h, v1.8h \n"
"fmla v31.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%5], #64 \n" // r4_0123
"fmla v28.8h, v23.8h, v2.8h \n"
"fmla v29.8h, v24.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v2.8h \n"
"fmla v31.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v28.8h, v24.8h, v3.8h \n"
"fmla v29.8h, v25.8h, v3.8h \n"
"fmla v30.8h, v26.8h, v3.8h \n"
"fmla v31.8h, v27.8h, v3.8h \n"
"fmla v28.8h, v12.8h, v4.8h \n"
"fmla v29.8h, v13.8h, v4.8h \n"
"fmla v30.8h, v14.8h, v4.8h \n"
"fmla v31.8h, v15.8h, v4.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%5] \n" // r4_4567
"fmla v28.8h, v13.8h, v5.8h \n"
"fmla v29.8h, v14.8h, v5.8h \n"
"fmla v30.8h, v15.8h, v5.8h \n"
"fmla v31.8h, v16.8h, v5.8h \n"
"fmla v28.8h, v14.8h, v6.8h \n"
"fmla v29.8h, v15.8h, v6.8h \n"
"fmla v30.8h, v16.8h, v6.8h \n"
"fmla v31.8h, v17.8h, v6.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v28.8h, v15.8h, v7.8h \n"
"fmla v29.8h, v16.8h, v7.8h \n"
"fmla v30.8h, v17.8h, v7.8h \n"
"fmla v31.8h, v18.8h, v7.8h \n"
"fmla v28.8h, v16.8h, v0.8h \n"
"fmla v29.8h, v17.8h, v0.8h \n"
"fmla v30.8h, v18.8h, v0.8h \n"
"fmla v31.8h, v19.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.8h, v17.8h}, [%1], #32 \n" // r0_01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v30.16b, %14.16b \n" // sum00
"mov v31.16b, %14.16b \n" // sum01
"fmla v30.8h, v16.8h, v0.8h \n"
"fmla v31.8h, v17.8h, v0.8h \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%1] \n" // r0_2345
"fmla v30.8h, v17.8h, v1.8h \n"
"fmla v31.8h, v18.8h, v1.8h \n"
"fmla v30.8h, v18.8h, v2.8h \n"
"fmla v31.8h, v19.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v30.8h, v19.8h, v3.8h \n"
"fmla v31.8h, v20.8h, v3.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v22.8h, v23.8h}, [%2], #32 \n" // r1_01
"fmla v30.8h, v20.8h, v4.8h \n"
"fmla v31.8h, v21.8h, v4.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%2] \n" // r1_2345
"fmla v30.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v5.8h \n"
"fmla v30.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v30.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v7.8h \n"
"fmla v30.8h, v25.8h, v0.8h \n"
"fmla v31.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3], #32 \n" // r2_01
"fmla v30.8h, v26.8h, v1.8h \n"
"fmla v31.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%3] \n" // r2_2345
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v31.8h, v17.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v31.8h, v18.8h, v3.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"fmla v31.8h, v19.8h, v4.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v22.8h, v23.8h}, [%4], #32 \n" // r3_01
"fmla v30.8h, v19.8h, v5.8h \n"
"fmla v31.8h, v20.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v30.8h, v20.8h, v6.8h \n"
"fmla v31.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%4] \n" // r3_2345
"fmla v30.8h, v22.8h, v7.8h \n"
"fmla v31.8h, v23.8h, v7.8h \n"
"fmla v30.8h, v23.8h, v0.8h \n"
"fmla v31.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v24.8h, v1.8h \n"
"fmla v31.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.8h, v17.8h}, [%5], #32 \n" // r4_01
"fmla v30.8h, v25.8h, v2.8h \n"
"fmla v31.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v30.8h, v26.8h, v3.8h \n"
"fmla v31.8h, v27.8h, v3.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%5] \n" // r4_2345
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v31.8h, v17.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"fmla v31.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v31.8h, v19.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v31.8h, v20.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"fmla v31.8h, v21.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h, v31.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.8h}, [%1], #16 \n" // r0_0
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v30.16b, %14.16b \n" // sum00
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%1] \n" // r0_1234
"fmla v30.8h, v16.8h, v0.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v30.8h, v17.8h, v1.8h \n"
"fmla v30.8h, v18.8h, v2.8h \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v21.8h}, [%2], #16 \n" // r1_0
"fmla v30.8h, v19.8h, v3.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%2] \n" // r1_1234
"fmla v30.8h, v20.8h, v4.8h \n"
"fmla v30.8h, v21.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v30.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.8h}, [%3], #16 \n" // r2_0
"fmla v30.8h, v24.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%3] \n" // r2_1234
"fmla v30.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v21.8h}, [%4], #16 \n" // r3_0
"fmla v30.8h, v18.8h, v4.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%4] \n" // r3_1234
"fmla v30.8h, v19.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v30.8h, v20.8h, v6.8h \n"
"fmla v30.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v0.8h \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.8h}, [%5], #16 \n" // r4_0
"fmla v30.8h, v23.8h, v1.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v30.8h, v24.8h, v2.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%5] \n" // r4_1234
"fmla v30.8h, v25.8h, v3.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v30");
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
static void convdw5x5s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
float16x8_t _sum0 = _bias0;
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k03 = vld1q_f16(k0 + 24);
float16x8_t _k04 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k00, _r00);
_sum0 = vfmaq_f16(_sum0, _k01, _r01);
_sum0 = vfmaq_f16(_sum0, _k02, _r02);
_sum0 = vfmaq_f16(_sum0, _k03, _r03);
_sum0 = vfmaq_f16(_sum0, _k04, _r04);
float16x8_t _r10 = vld1q_f16(r1);
float16x8_t _r11 = vld1q_f16(r1 + 8);
float16x8_t _r12 = vld1q_f16(r1 + 16);
float16x8_t _r13 = vld1q_f16(r1 + 24);
float16x8_t _r14 = vld1q_f16(r1 + 32);
float16x8_t _k10 = vld1q_f16(k0);
float16x8_t _k11 = vld1q_f16(k0 + 8);
float16x8_t _k12 = vld1q_f16(k0 + 16);
float16x8_t _k13 = vld1q_f16(k0 + 24);
float16x8_t _k14 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k10, _r10);
_sum0 = vfmaq_f16(_sum0, _k11, _r11);
_sum0 = vfmaq_f16(_sum0, _k12, _r12);
_sum0 = vfmaq_f16(_sum0, _k13, _r13);
_sum0 = vfmaq_f16(_sum0, _k14, _r14);
float16x8_t _r20 = vld1q_f16(r2);
float16x8_t _r21 = vld1q_f16(r2 + 8);
float16x8_t _r22 = vld1q_f16(r2 + 16);
float16x8_t _r23 = vld1q_f16(r2 + 24);
float16x8_t _r24 = vld1q_f16(r2 + 32);
float16x8_t _k20 = vld1q_f16(k0);
float16x8_t _k21 = vld1q_f16(k0 + 8);
float16x8_t _k22 = vld1q_f16(k0 + 16);
float16x8_t _k23 = vld1q_f16(k0 + 24);
float16x8_t _k24 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k20, _r20);
_sum0 = vfmaq_f16(_sum0, _k21, _r21);
_sum0 = vfmaq_f16(_sum0, _k22, _r22);
_sum0 = vfmaq_f16(_sum0, _k23, _r23);
_sum0 = vfmaq_f16(_sum0, _k24, _r24);
float16x8_t _r30 = vld1q_f16(r3);
float16x8_t _r31 = vld1q_f16(r3 + 8);
float16x8_t _r32 = vld1q_f16(r3 + 16);
float16x8_t _r33 = vld1q_f16(r3 + 24);
float16x8_t _r34 = vld1q_f16(r3 + 32);
float16x8_t _k30 = vld1q_f16(k0);
float16x8_t _k31 = vld1q_f16(k0 + 8);
float16x8_t _k32 = vld1q_f16(k0 + 16);
float16x8_t _k33 = vld1q_f16(k0 + 24);
float16x8_t _k34 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k30, _r30);
_sum0 = vfmaq_f16(_sum0, _k31, _r31);
_sum0 = vfmaq_f16(_sum0, _k32, _r32);
_sum0 = vfmaq_f16(_sum0, _k33, _r33);
_sum0 = vfmaq_f16(_sum0, _k34, _r34);
float16x8_t _r40 = vld1q_f16(r4);
float16x8_t _r41 = vld1q_f16(r4 + 8);
float16x8_t _r42 = vld1q_f16(r4 + 16);
float16x8_t _r43 = vld1q_f16(r4 + 24);
float16x8_t _r44 = vld1q_f16(r4 + 32);
float16x8_t _k40 = vld1q_f16(k0);
float16x8_t _k41 = vld1q_f16(k0 + 8);
float16x8_t _k42 = vld1q_f16(k0 + 16);
float16x8_t _k43 = vld1q_f16(k0 + 24);
float16x8_t _k44 = vld1q_f16(k0 + 32);
k0 -= 160;
_sum0 = vfmaq_f16(_sum0, _k40, _r40);
_sum0 = vfmaq_f16(_sum0, _k41, _r41);
_sum0 = vfmaq_f16(_sum0, _k42, _r42);
_sum0 = vfmaq_f16(_sum0, _k43, _r43);
_sum0 = vfmaq_f16(_sum0, _k44, _r44);
vst1q_f16(outptr0, _sum0);
outptr0 += 8;
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
wino_conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "wino_conv_kernel_x86.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = WINO_MAX(data[i], ( float )0);
if (activation > 0)
{
data[i] = WINO_MIN(data[i], ( float )activation);
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block,
float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch,
int outw, int outh, int outch, int num_thread)
{
size_t elemsize = sizeof(float);
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
float* bottom_blob_bordered = bottom_blob;
int outw_align = (outw + 3) / 4 * 4;
int outh_align = (outh + 3) / 4 * 4;
w = outw_align + 2;
h = outh_align + 2;
// BEGIN transform input
float* bottom_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 4 * inch * tiles;
bottom_blob_tm = transform_input;
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered + q * w * h;
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q;
float* out_tm1 = out_tm0 + tiles_n;
float* out_tm2 = out_tm0 + 2 * tiles_n;
float* out_tm3 = out_tm0 + 3 * tiles_n;
float* out_tm4 = out_tm0 + 4 * tiles_n;
float* out_tm5 = out_tm0 + 5 * tiles_n;
float* out_tm6 = out_tm0 + 6 * tiles_n;
float* out_tm7 = out_tm0 + 7 * tiles_n;
float* out_tm8 = out_tm0 + 8 * tiles_n;
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
// BEGIN dot
float* top_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 36 * tiles;
top_blob_tm = dot_block;
#pragma omp parallel for num_threads(num_thread)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp << 3;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
float* output4_tm = top_blob_tm + tiles_n * (p + 4);
float* output5_tm = top_blob_tm + tiles_n * (p + 5);
float* output6_tm = top_blob_tm + tiles_n * (p + 6);
float* output7_tm = top_blob_tm + tiles_n * (p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm + 36 * tiles * p;
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr =
kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 4;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
}
}
// END dot
// BEGIN transform output
float* top_blob_bordered = NULL;
if (outw_align == outw && outh_align == outh)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered = output_bordered;
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm + 36 * tiles * p;
float* outRow0 = top_blob_bordered + outw_align * outh_align * p;
float* outRow1 = outRow0 + outw_align;
float* outRow2 = outRow0 + outw_align * 2;
float* outRow3 = outRow0 + outw_align * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw_align * 3;
outRow1 += outw_align * 3;
outRow2 += outw_align * 3;
outRow3 += outw_align * 3;
}
}
}
// END transform output
if (outw_align != outw || outh_align != outw)
{
delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0);
}
}
void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch)
{
float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float));
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36;
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3] = {0};
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
float* kernel_tm_test = kernel_wino;
for (int r = 0; r < 9; r++)
{
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36;
const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36;
const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36;
const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36;
float* ktmp = kernel_tm_test + p / 8 * inch * 32;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm_test += 4 * inch * outch;
}
free(kernel_tm);
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int output_c = output_tensor->dims[1];
int output_h = output_tensor->dims[2];
int output_w = output_tensor->dims[3];
int pad_h = param->pad_h0;
int pad_w = param->pad_w0;
float* kernel = ( float* )filter_tensor->data;
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
int block_h = (output_h + TILE - 1) / TILE;
int block_w = (output_w + TILE - 1) / TILE;
int block = block_h * block_w;
int padded_inh = TILE * block_h + 2;
int padded_inw = TILE * block_w + 2;
int pad_inhw = padded_inh * padded_inw;
int outw = block_w * TILE;
int outh = block_h * TILE;
priv_info->input_pad = ( float* )sys_malloc(input_c * pad_inhw * sizeof(float));
memset(priv_info->input_pad, 0, input_c * pad_inhw * sizeof(float));
priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float));
priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float));
priv_info->output_bordered = NULL;
if (outw != output_w || outh != output_h)
{
priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float));
}
conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (priv_info->input_pad)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
if (priv_info->dot_block)
{
sys_free(priv_info->dot_block);
priv_info->dot_block = NULL;
}
if (priv_info->transform_input)
{
sys_free(priv_info->transform_input);
priv_info->transform_input = NULL;
}
if (priv_info->output_bordered)
{
sys_free(priv_info->output_bordered);
priv_info->output_bordered = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int group = param->group;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_c_g = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int input_size_g = in_c_g * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* biases = NULL;
if (bias_tensor != NULL)
biases = ( float* )bias_tensor->data;
pad_0_align_3D(priv_info->input_pad, input, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0);
for (int i = 0; i < batch; i++)
{
for (int g = 0; g < group; g++)
{
conv3x3s1_winograd43_sse(priv_info->input_pad + i * input_size + g * input_size_g, output,
priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input,
priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h,
out_c, num_thread);
}
}
if (act_type >= 0)
{
relu(output, batch * output_size, act_type);
}
return 0;
} |
GB_bitmap_add_template.c | //------------------------------------------------------------------------------
// GB_bitmap_add_template: C = A+B, C<M>=A+B, and C<!M>=A+B, C bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap. The mask M can have any sparsity structure, and is efficient
// to apply (all methods are asymptotically optimal). All cases (no M, M, !M)
// are handled. The values of A, B, and C are not accessed if C is iso,
// in which case GB_ISO_ADD is #defined' by the #including file.
{
// TODO: the input C can be modified in-place, if it is also bitmap
int64_t cnvals = 0 ;
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// bitmap . sparse bitmap
// bitmap . bitmap sparse
// bitmap . bitmap bitmap
ASSERT (A_is_bitmap || B_is_bitmap) ;
ASSERT (!A_is_full) ;
ASSERT (!B_is_full) ;
if (A_is_bitmap && B_is_bitmap)
{
//------------------------------------------------------------------
// Method21: C, A, and B are all bitmap
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
#ifdef GB_ISO_ADD
int8_t c = Ab [p] || Bb [p] ;
#else
int8_t c = 0 ;
if (Ab [p] && Bb [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (Bb [p])
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
c = 1 ;
}
else if (Ab [p])
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
c = 1 ;
}
#endif
Cb [p] = c ;
task_cnvals += c ;
}
cnvals += task_cnvals ;
}
}
else if (A_is_bitmap)
{
//------------------------------------------------------------------
// Method22: C and A are bitmap; B is sparse or hypersparse
//------------------------------------------------------------------
#ifdef GB_ISO_ADD
GB_memcpy (Cb, Ab, cnz, C_nthreads) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(C_nthreads) \
schedule(static)
for (p = 0 ; p < cnz ; p++)
{
int8_t a = Ab [p] ;
if (a)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
}
Cb [p] = a ;
}
#endif
cnvals = A->nvals ;
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
if (Cb [p])
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, p , A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB, B_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method23: C and B are bitmap; A is sparse or hypersparse
//------------------------------------------------------------------
#ifdef GB_ISO_ADD
GB_memcpy (Cb, Bb, cnz, C_nthreads) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(C_nthreads) \
schedule(static)
for (p = 0 ; p < cnz ; p++)
{
int8_t b = Bb [p] ;
if (b)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
}
Cb [p] = b ;
}
#endif
cnvals = B->nvals ;
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
if (Cb [p])
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, p , B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA, A_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
}
else if (M_is_sparse_or_hyper)
{
//----------------------------------------------------------------------
// C is bitmap, M is sparse or hyper and complemented
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap sparse sparse bitmap
// bitmap sparse sparse full
// bitmap sparse bitmap sparse
// bitmap sparse bitmap bitmap
// bitmap sparse bitmap full
// bitmap sparse full sparse
// bitmap sparse full bitmap
// bitmap sparse full full
// M is sparse and complemented. If M is sparse and not
// complemented, then C is constructed as sparse, not bitmap.
ASSERT (Mask_comp) ;
// C(i,j) = A(i,j) + B(i,j) can only be computed where M(i,j) is
// not present in the sparse pattern of M, and where it is present
// but equal to zero.
//----------------------------------------------------------------------
// scatter M into the C bitmap
//----------------------------------------------------------------------
GB_SLICE_MATRIX (M, 8, chunk) ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark C(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pC_start + i ;
Cb [p] = 2 ;
}
}
}
}
// C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1.
// These positions will not be computed in C(i,j). C(i,j) can only
// be modified where Cb [p] is zero.
//----------------------------------------------------------------------
// compute C<!M>=A+B using the mask scattered in C
//----------------------------------------------------------------------
bool M_cleared = false ;
if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full))
{
//------------------------------------------------------------------
// Method24(!M,sparse): C is bitmap, both A and B are bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
int8_t c = Cb [p] ;
if (c == 0)
{
// M(i,j) is zero, so C(i,j) can be computed
int8_t a = GBB (Ab, p) ;
int8_t b = GBB (Bb, p) ;
#ifdef GB_ISO_ADD
c = a || b ;
#else
if (a && b)
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (b)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
c = 1 ;
}
else if (a)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
c = 1 ;
}
#endif
Cb [p] = c ;
task_cnvals += c ;
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
M_cleared = true ; // M has also been cleared from C
}
else if (A_is_bitmap || A_is_full)
{
//------------------------------------------------------------------
// Method25(!M,sparse): C bitmap, A bitmap or full, B sparse/hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (Cb [p] == 0)
{
int8_t a = GBB (Ab, p) ;
#ifndef GB_ISO_ADD
if (a)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
}
#endif
Cb [p] = a ;
task_cnvals += a ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, p , A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else if (c == 0)
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB, B_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method26: C bitmap, A sparse or hypersparse, B bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (Cb [p] == 0)
{
int8_t b = GBB (Bb, p) ;
#ifndef GB_ISO_ADD
if (b)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
}
#endif
Cb [p] = b ;
task_cnvals += b ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, p , B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else if (c == 0)
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA, A_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
//---------------------------------------------------------------------
// clear M from C
//---------------------------------------------------------------------
if (!M_cleared)
{
// This step is required if either A or B are sparse/hyper (if
// one is sparse/hyper, the other must be bitmap). It requires
// an extra pass over the mask M, so this might be slower than
// postponing the application of the mask, and doing it later.
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark C(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pC_start + i ;
Cb [p] = 0 ;
}
}
}
}
}
}
else
{
//----------------------------------------------------------------------
// C is bitmap; M is bitmap or full
//----------------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// bitmap bitmap sparse bitmap
// bitmap bitmap sparse full
// bitmap bitmap bitmap sparse
// bitmap bitmap bitmap bitmap
// bitmap bitmap bitmap full
// bitmap bitmap full sparse
// bitmap bitmap full bitmap
// bitmap bitmap full full
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// bitmap full sparse bitmap
// bitmap full sparse full
// bitmap full bitmap sparse
// bitmap full bitmap bitmap
// bitmap full bitmap full
// bitmap full full sparse
// bitmap full full bitmap
// bitmap full full full
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap bitmap sparse sparse
// bitmap bitmap sparse bitmap
// bitmap bitmap sparse full
// bitmap bitmap bitmap sparse
// bitmap bitmap bitmap bitmap
// bitmap bitmap bitmap full
// bitmap bitmap full sparse
// bitmap bitmap full bitmap
// bitmap bitmap full full
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap full sparse sparse
// bitmap full sparse bitmap
// bitmap full sparse full
// bitmap full bitmap sparse
// bitmap full bitmap bitmap
// bitmap full bitmap full
// bitmap full full sparse
// bitmap full full bitmap
// bitmap full full full
ASSERT (M_is_bitmap || M_is_full) ;
ASSERT (A_is_bitmap || A_is_full || B_is_bitmap || B_is_full) ;
#undef GB_GET_MIJ
#define GB_GET_MIJ(p) \
bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \
if (Mask_comp) mij = !mij ;
if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full))
{
//------------------------------------------------------------------
// Method27: C is bitmap; M, A, and B are bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
// M(i,j) is true, so C(i,j) can be computed
int8_t a = GBB (Ab, p) ;
int8_t b = GBB (Bb, p) ;
#ifdef GB_ISO_ADD
int8_t c = a || b ;
#else
int8_t c = 0 ;
if (a && b)
{
// C (i,j) = A (i,j) + B (i,j)
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (b)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
c = 1 ;
}
else if (a)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
c = 1 ;
}
#endif
Cb [p] = c ;
task_cnvals += c ;
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
}
else if (A_is_bitmap || A_is_full)
{
//------------------------------------------------------------------
// Method28: C bitmap; M and A bitmap or full; B sparse or hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
int8_t a = GBB (Ab, p) ;
#ifndef GB_ISO_ADD
if (a)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, p, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ;
}
#endif
}
#endif
Cb [p] = a ;
task_cnvals += a ;
}
else
{
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
GB_GET_MIJ (p) ;
if (mij)
{
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, p , A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB, B_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method29: C bitmap; M and B bitmap or full; A sparse or hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
int8_t b = GBB (Bb, p) ;
#ifndef GB_ISO_ADD
if (b)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, p, B_iso) ;
GB_BINOP (GB_CX (p), alpha_scalar, bij,
p % vlen, p / vlen) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ;
}
#endif
}
#endif
Cb [p] = b ;
task_cnvals += b ;
}
else
{
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
GB_GET_MIJ (p) ;
if (mij)
{
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, p , B_iso) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
#endif
}
else
{
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (p), aij, beta_scalar,
i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA, A_iso) ;
}
#endif
#endif
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
}
cnvals += task_cnvals ;
}
}
}
C->nvals = cnvals ;
}
|
Image.h | #pragma once
#include<vector>
#include<fstream>
#include<iostream>
#include<functional>
#include<cmath>
#include<random>
#include "Vec3.h"
#include "PNG.h"
using namespace std;
// Returns the distance between the two vectors projected on the unit sphere
float projectedDistance(float x1, float y1, float x2, float y2) {
return 2 - 2 * x1 * x2 - 2 * y1 * y2 - 2 * sqrt((1 - x1 * x1 - y1 * y1) * (1 - x2 * x2 - y2 * y2));
}
float projectedDistance(Vec3f v1, Vec3f v2) {
return projectedDistance(v1[0], v1[1], v2[0], v2[1]);
}
Vec3f nearestSeed(Vec3f q, const vector<vector<Vec3f>> &seeds) {
Vec3f nearest = {0.5, 0.5, 0};
if (seeds.size() == 0 || seeds[0].size() == 0)
return nearest;
for (const vector<Vec3f> &layer: seeds) {
for (Vec3f seed: layer) {
if ((q - seed).length() < (q - nearest).length()) {
nearest = seed;
}
}
}
return nearest;
}
// TODO: better search, kd-tree or something
vector<Vec3f> nearestSeeds(Vec3f q, const vector<vector<Vec3f>> &seeds, function<bool(Vec3f)> test) {
vector<Vec3f> nearests;
if (seeds.size() == 0 || seeds[0].size() == 0)
return nearests;
for (const vector<Vec3f> &layer: seeds) {
Vec3f nearest = {0.5, 0.5, 0};
for (Vec3f seed: layer) {
if (test(seed) && (q - seed).length() < (q - nearest).length()) {
nearest = seed;
}
}
nearests.push_back(nearest);
}
return nearests;
}
float grid1d(float q, int d) {
float step = pow(0.5, d + 1);
if (q < step)
return step;
if (q > 1 - step)
return 1 - step;
else
return round(q / step) * step;
}
Vec3f gridSeed(Vec3f q, int d) {
return {grid1d(q[0], d), grid1d(q[1], d), 0};
}
void genSeeds(int depth, vector<vector<Vec3f>> &seeds) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
for (int d=0; d<depth; d++) {
seeds.emplace_back();
for (int i=0; i<pow(2, d + 1) - 1; i++) {
for (int j=0; j<pow(2, d + 1) - 1; j++) {
seeds.back().push_back((float)pow(0.5, d + 1) * Vec3f({i + 0.75f + 0.5f * float(dis(gen)), j + 0.75f + 0.5f * float(dis(gen)), 0}));
}
}
}
}
class Image {
public:
int m_width, m_height;
vector<Vec3f> m_data; // Values between 0 and 1
Image() {
m_width = 0;
m_height = 0;
m_data = vector<Vec3f>();
}
Image(int w, int h) {
m_width = w;
m_height = h;
m_data = vector<Vec3f>();
m_data.assign(w * h, {0.0, 0.0, 0.0});
}
void write(const char *fname) {
int max_color = 255;
ofstream outfile;
outfile.open(fname, ios::out | ios::trunc );
outfile << "P3" << endl << m_width << " " << m_height << endl << max_color << endl;
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
Vec3f pxl = m_data.at(y * m_width + x);
int r = (int)(max_color * max(0.0f, min(1.0f, pxl[0])));
int g = (int)(max_color * max(0.0f, min(1.0f, pxl[1])));
int b = (int)(max_color * max(0.0f, min(1.0f, pxl[2])));
outfile << r << " " << g << " " << b << " ";
}
outfile << endl;
}
outfile.close();
}
void fillBackground(Vec3f color1={0.0, 0.0, 1.0}, Vec3f color2={1.0, 1.0, 1.0}) {
// #pragma omp parallel for
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
float t = (float)y / (float)m_height;
m_data[y * m_width + x] = {(1 - t) * color1[0] + t * color2[0],
(1 - t) * color1[1] + t * color2[1],
(1 - t) * color1[2] + t * color2[2]};
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
// Uses styleBlit's brute force algorithm
void styleBlit(const char *fname, float threshold = 0.05) {
vector<bool> done = vector<bool>(m_width * m_height, false);
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
if (!done[y * m_width + x]) {
Vec3f normal = m_data.at(y * m_width + x);
if (normal[2] <= __FLT_EPSILON__) {// If there is no mesh there or only the ground (-1 or 0)
m_data[y * m_width + x] = {0, 0, 0};
done[y * m_width + x] = true;
}
else {
float abs_norm = 0.5f + normal[0] / 2.0f;
float ord_norm = 0.5f + normal[1] / 2.0f;
int abs = (int)floor(abs_norm * pngr.width);
int ord = (int)floor(ord_norm * pngr.height);
for (int y_src = 0; y_src < pngr.height; y_src++) {
for (int x_src = 0; x_src < pngr.width; x_src++) {
int x_new = x + x_src - abs;
int y_new = y + y_src - ord;
if (0<=y_new && y_new<m_height &&
0<=x_new && x_new<m_width &&
!done[y_new * m_width + x_new] &&
m_data[y_new * m_width + x_new][2]>__FLT_EPSILON__) {
Vec3f new_normal = m_data[y_new * m_width + x_new];
Vec3f src_normal = {float(x_src) / float(pngr.width) * 2.0f - 1.0f, float(y_src) / float(pngr.height) * 2.0f - 1.0f, 0};
float error = projectedDistance(new_normal[0],
new_normal[1],
src_normal[0],
src_normal[1]);
if (error < threshold) {
png_byte *src_pxl = &pngr.row_pointers[y_src][x_src * 4]; // *4 because of the number of channels
m_data[y_new * m_width + x_new] = {float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f};
done[y_new * m_width + x_new] = true;
}
}
}
}
}
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
// Uses styleBlit's seeded version
void styleBlitTree(const char *fname, float threshold = 0.05, int depth=8, float ball_scale = 0.3) {
// Copy the guiding channels
vector<Vec3f> guides = this->m_data;
this->m_data.assign(m_width * m_height, {0, 0, 0});
vector<float> coefs = vector<float>();
coefs.assign(m_width * m_height, 0);
// Generate seeds
vector<vector<Vec3f>> seeds;
genSeeds(depth, seeds);
// Transfer style
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
Vec3f normal = guides[y * m_width + x];
if (normal[2] > __FLT_EPSILON__) {
Vec3f pos_01 = {float(x) / float(m_width), float(y) / float(m_height), 0};
function<bool(Vec3f)> test = [this, &guides](Vec3f s){return abs(guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))][2] - 1) < __FLT_EPSILON__;};
vector<Vec3f> ns = nearestSeeds(pos_01, seeds, test);
int depth = 0;
for (Vec3f s: ns) {
Vec3f target_seed_normal = guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))];
Vec3f src_seed_01 = (target_seed_normal + Vec3f(1.0f, 1.0f, 0)) / 2.0f;
Vec3f predicted_src_01 = src_seed_01 + (pos_01 - s) / (ball_scale * pow(0.9, depth));
Vec3f predicted_normal = {2.0f * float(predicted_src_01[0]) - 1.0f,
2.0f * float(predicted_src_01[1]) - 1.0f, 0};
float error = projectedDistance(normal, predicted_normal);
if (!isnan(error) && abs(target_seed_normal[2] - 1) < __FLT_EPSILON__) {
float coef = 1.0f / (1 + exp(200 * (error - threshold)));
Vec3i predicted_src = {int(predicted_src_01[0] * pngr.width), int(predicted_src_01[1] * pngr.height), 0};
png_byte *src_pxl = &pngr.row_pointers[predicted_src[1]][predicted_src[0] * 4]; // *4 because of the number of channels
this->m_data[y * m_width + x] += coef * (1 - coefs[y * m_width + x]) * Vec3f(float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f);
coefs[y * m_width + x] += coef * (1 - coefs[y * m_width + x]);
}
depth++;
}
m_data[y * m_width + x] /= coefs[y * m_width + x];
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere, but actually only store the coordinates of the source point to transfer in m_data
// Output should be passed through transferFrom afterwards
// Uses styleBlit's seeded version
void styleBlitTreeCoords(float threshold = 0.05, int depth=8, float ball_scale = 0.3) {
// Copy the guiding channels
vector<Vec3f> guides = this->m_data;
this->m_data.assign(m_width * m_height, {0, 0, 0});
// vector<float> coefs = vector<float>();
// coefs.assign(m_width * m_height, 0);
// Generate seeds
vector<vector<Vec3f>> seeds;
genSeeds(depth, seeds);
// Transfer style
// PNG_handler pngr = PNG_handler();
// pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
Vec3f normal = guides[y * m_width + x];
if (normal[2] > __FLT_EPSILON__) {
Vec3f pos_01 = {float(x) / float(m_width), float(y) / float(m_height), 0};
function<bool(Vec3f)> test = [this, &guides](Vec3f s){return abs(guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))][2] - 1) < __FLT_EPSILON__;};
vector<Vec3f> ns = nearestSeeds(pos_01, seeds, test);
for (Vec3f s: ns) {
Vec3f target_seed_normal = guides[round(floor(s[1] * m_height) * m_width + floor(s[0] * m_width))];
Vec3f src_seed_01 = (target_seed_normal + Vec3f(1.0f, 1.0f, 0)) / 2.0f;
Vec3f predicted_src_01 = src_seed_01 + (pos_01 - s) / ball_scale;
Vec3f predicted_normal = {2.0f * float(predicted_src_01[0]) - 1.0f,
2.0f * float(predicted_src_01[1]) - 1.0f, 0};
float error = projectedDistance(normal, predicted_normal);
// cout << target_seed_normal << " | " << normal << " | " << s << " | " << pos_01 << " | " << src_seed_01 << " | " << predicted_normal << " | " << error << endl;
if (!isnan(error) && abs(target_seed_normal[2] - 1) < __FLT_EPSILON__ && error < threshold) {
// float coef = 1.0f / (1 + exp(5000 * (error - threshold)));
// Vec3i predicted_src = {int(predicted_src_01[0] * pngr.width), int(predicted_src_01[1] * pngr.height), 0};
// png_byte *src_pxl = &pngr.row_pointers[predicted_src[1]][predicted_src[0] * 4]; // *4 because of the number of channels
// this->m_data[y * m_width + x] = Vec3f(float((int)src_pxl[0]) / 255.0f,
// float((int)src_pxl[1]) / 255.0f,
// float((int)src_pxl[2]) / 255.0f);
this->m_data[y * m_width + x] = Vec3f(predicted_src_01[0], predicted_src_01[1], predicted_src_01[2]);
break;
// coefs[y * m_width + x] += coef * (1 - coefs[y * m_width + x]);
}
}
// m_data[y * m_width + x] /= coefs[y * m_width + x];
}
}
}
}
// The image should be a normal map
// Style the image according to the lit sphere in file fname
void litSphere(const char *fname) {
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
for (int y = 0; y < m_height; y++) {
for (int x = 0; x < m_width; x++) {
Vec3f normal = m_data.at(y * m_width + x);
if (normal[2] <= __FLT_EPSILON__)
m_data[y * m_width + x] = {0, 0, 0};
else {
float abs_norm = 0.5f + normal[0] / 2.0f;
float ord_norm = 0.5f + normal[1] / 2.0f;
int abs = (int)floor(abs_norm * pngr.width);
int ord = (int)floor(ord_norm * pngr.height);
png_byte *src_pxl = &pngr.row_pointers[ord][abs * 4]; // *4 because of the number of channels
// printf("%4d, %4d = RGBA(%3d, %3d, %3d, %3d)\n", x, y, src_pxl[0], src_pxl[1], src_pxl[2], src_pxl[3]);
m_data[y * m_width + x] = {float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f};
}
}
}
}
void linearBlur(float radius) {
// Copy the image
vector<Vec3f> data_bak = this->m_data;
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
if (abs(m_data[y * m_width + x][2] - 1) < 1) {
Vec3f value = {0, 0, 0};
float coef = 0;
for (int y_off = -ceil(radius); y_off <= ceil(radius); y_off++) {
for (int x_off = -ceil(radius); x_off <= ceil(radius); x_off++) {
if (pow(x_off, 2) + pow(y_off, 2) < pow(radius, 2)) {
value += data_bak[(y + y_off) * m_width + (x + x_off)];
coef += 1;
}
}
}
m_data[y * m_width + x] = value / coef;
}
}
}
}
void transferFrom(const char *fname) {
PNG_handler pngr = PNG_handler();
pngr.read_png_file(fname);
#pragma omp parallel for
for (int y = 0; y < m_height; y++) {
#pragma omp parallel for
for (int x = 0; x < m_width; x++) {
if (abs(m_data[y * m_width + x][2] - 1) < 0.9) {
png_byte *src_pxl = &pngr.row_pointers[int(round(m_data[y * m_width + x][1] * pngr.height))][int(round(m_data[y * m_width + x][0] * pngr.width)) * 4]; // *4 because of the number of channels
this->m_data[y * m_width + x] = Vec3f(float((int)src_pxl[0]) / 255.0f,
float((int)src_pxl[1]) / 255.0f,
float((int)src_pxl[2]) / 255.0f);
}
}
}
}
};
|
omp_taskwait.c | <ompts:test>
<ompts:testdescription>Test which checks the omp taskwait directive. First we generate a set of tasks, which set the elements of an array to a specific value. Then we do a taskwait and check if all tasks finished meaning all array elements contain the right value. Then we generate a second set setting the array elements to another value. After the parallel region we check if all tasks of the second set finished and were executed after the tasks of the first set.</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp taskwait</ompts:directive>
<ompts:dependences>omp single</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_taskwait</ompts:testcode:functionname>(FILE * logFile){
int result1 = 0; /* Stores number of not finished tasks after the taskwait */
int result2 = 0; /* Stores number of wrong array elements at the end */
int array[NUM_TASKS];
int i;
/* fill array */
for (i = 0; i < NUM_TASKS; i++)
array[i] = 0;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task
{
my_sleep (SLEEPTIME);
array[myi] = 1;
} /* end of omp task */
} /* end of for */
<ompts:orphan>
<ompts:check>#pragma omp taskwait</ompts:check>
</ompts:orphan>
/* check if all tasks were finished */
for (i = 0; i < NUM_TASKS; i++)
if (array[i] != 1)
result1++;
/* generate some more tasks which now shall overwrite
* the values in the tids array */
for (i = 0; i < NUM_TASKS; i++) {
int myi;
myi = i;
#pragma omp task
{
array[myi] = 2;
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* final check, if all array elements contain the right values: */
for (i = 0; i < NUM_TASKS; i++) {
if (array[i] != 2)
result2++;
}
return ((result1 == 0) && (result2 == 0));
}
</ompts:testcode>
</ompts:test>
|
sgbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbsv.c, normal z -> s, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbsv
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_sgbtrf.
*
*******************************************************************************
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_sgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
******************************************************************************/
int plasma_sgbsv(int n, int kl, int ku, int nrhs,
float *pAB, int ldab, int *ipiv,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (kl < 0) {
plasma_error("illegal value of kl");
return -2;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -6;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealFloat, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use sgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealFloat, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_spb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_sgbsv(AB, ipiv, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_sdesc2pb(AB, pAB, ldab, &sequence, &request);
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrices in tile layout.
plasma_desc_destroy(&B);
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_sgbtrf.
* Non-blocking tile version of plasma_sgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* Descriptor of right-hand-sides B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
******************************************************************************/
void plasma_omp_sgbsv(plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid AB");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel function.
plasma_psgbtrf(AB, ipiv, sequence, request);
plasma_pstbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pstbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
|
GB_binop__isgt_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_uint64
// A.*B function (eWiseMult): GB_AemultB__isgt_uint64
// A*D function (colscale): GB_AxD__isgt_uint64
// D*A function (rowscale): GB_DxB__isgt_uint64
// C+=B function (dense accum): GB_Cdense_accumB__isgt_uint64
// C+=b function (dense accum): GB_Cdense_accumb__isgt_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint64
// C=scalar+B GB_bind1st__isgt_uint64
// C=scalar+B' GB_bind1st_tran__isgt_uint64
// C=A+scalar GB_bind2nd__isgt_uint64
// C=A'+scalar GB_bind2nd_tran__isgt_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT64 || GxB_NO_ISGT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isgt_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MRF-GCO.h | #pragma once
#include "BaseMRF.h"
#include "Log.h"
#include "GCoptimization.h"
#include <vector>
#include <map>
//#include <google/heap-profiler.h>
#include <limits.h>
#include <time.h>
#include <vector>
namespace SRS{
/** \brief
* Wrapper for Olga Vekslers Multilabel graph cut library
*/
template<class TGraphModel>
class GCO_SRSMRFSolver :public BaseMRFSolver<TGraphModel>{
public:
//typedef short EnergyType;
typedef float EnergyType;
typedef GCO_SRSMRFSolver<TGraphModel> Self;
typedef TGraphModel GraphModelType;
typedef typename GraphModelType::Pointer GraphModelPointerType;
static const int D = GraphModelType::ImageType::ImageDimension;
typedef GCoptimizationGeneralGraph MRFType;
typedef EnergyType (* SmoothFn)(int s1,int s2,int l1, int l2);
protected:
MRFType * m_optimizer;
double m_unaryRegistrationWeight,m_unarySegmentationWeight;
static double m_pairwiseSegmentationRegistrationWeight,m_pairwiseRegistrationWeight,m_pairwiseSegmentationWeight;
int verbose;
int nNodes, nRegNodes, nSegNodes, nEdges;
clock_t m_start;
int nRegLabels;
int nSegLabels;
bool m_segment, m_register,m_coherence;
double m_lastLowerBound;
std::vector<int> m_labelOrder;
int m_zeroDisplacementLabel;
bool m_deleteRegNeighb;
//ugly static members because of GCO
static std::vector<std::vector<std::vector<std::map<int,float> > > > (*regPairwise);
static std::vector<std::vector<std::vector<float > > > *srsPairwise;
static std::vector<std::vector<std::vector<std::vector<float> > > > *segPairwise;
static int S0,S1;
static int GLOBALnRegNodes,GLOBALnSegNodes,GLOBALnSegLabels,GLOBALnRegLabels;
static GraphModelPointerType m_GraphModel;
static bool m_cachePotentials;
static int getRelativeNodeIndex(int idx1,int idx2, int S0, int S1=-1 ){
//returns the neighbor direction.
//idx1= d0 + d1*S0 + d2*S0*S1
//idx2 is either
//1 d0+1 + d1 *S0 + d2 *S0*S1
//2 d0 + (d1+1)*S0 + d2 *S0*S1
//3 d0 + d1 *S0 + (d2+1)*S0*S1
int diff = idx2-idx1;
if (diff == 1 ) return 0;
if (diff == S0 ) return 1;
if (diff == S0*S1) return 2;
std::cerr<<"Error! idx difference doesn make sense : "<<VAR(diff)<<std::endl;
return -1;
}
//neighbor structure for GCO
int * m_numberOfNeighborsofEachNode;
int ** m_neighbourArray,*m_segNeighbors,*m_regNeighbors;
EnergyType ** m_weights,*m_segWeights,*m_regWeights;
public:
static EnergyType GLOBALsmoothFunction(int node1, int node2, int label1, int label2){
float pot=-1;
if (node1>node2){
int tmp=node1; node1=node2; node2=tmp;
tmp=label1; label1=label2; label2=tmp;
}
if (m_cachePotentials){
if (node1>=GLOBALnRegNodes && node2>=GLOBALnRegNodes){
//segmentation pairwise!
if ( (label1<GLOBALnRegLabels) || (label2<GLOBALnRegLabels) ){
pot=0.0;
}else{
pot=(*segPairwise)[label1-GLOBALnRegLabels][label2-GLOBALnRegLabels][node1-GLOBALnRegNodes][getRelativeNodeIndex(node1-GLOBALnRegNodes,node2-GLOBALnRegNodes,S0,S1)];
}
}else if (node1<GLOBALnRegNodes && node2<GLOBALnRegNodes){
//registration pairwise!
if (label2>=GLOBALnRegLabels || label1>=GLOBALnRegLabels){
pot=0.0;
}else{
pot=(*regPairwise)[label1][label2][node1][node2];
}
}else{
//srs pairwise
if (label2<GLOBALnRegLabels || label1>=GLOBALnRegLabels){
//impossible labelling, either regnode getting assigned a seglabel, or vice versa
pot=100000;
}else {
pot=(*srsPairwise)[label2-GLOBALnRegLabels][label1][node2-GLOBALnRegNodes];
}
}
}else{
if (node1>=GLOBALnRegNodes && node2>=GLOBALnRegNodes){
//segmentation pairwise!
if ( (label1<GLOBALnRegLabels) || (label2<GLOBALnRegLabels) ){
pot=0.0;
}else{
pot = m_pairwiseSegmentationWeight*m_GraphModel->getPairwiseSegmentationPotential(node1-GLOBALnRegNodes,node2-GLOBALnRegNodes,label1-GLOBALnRegLabels,label2-GLOBALnRegLabels);
}
}else if (node1<GLOBALnRegNodes && node2<GLOBALnRegNodes){
//registration pairwise!
if (label2>=GLOBALnRegLabels || label1>=GLOBALnRegLabels){
pot=0.0;
}else{
pot=m_pairwiseRegistrationWeight*m_GraphModel->getPairwiseRegistrationPotential(node1,node2,label1,label2);
}
}else{
//srs pairwise
if (label2<GLOBALnRegLabels || label1>=GLOBALnRegLabels){
//impossible labelling, either regnode getting assigned a seglabel, or vice versa
pot=100000;
}else {
pot=m_pairwiseSegmentationRegistrationWeight*m_GraphModel->getPairwiseRegSegPotential(node2-GLOBALnRegNodes,label1,label2-GLOBALnRegLabels);
}
}
}
//LOGV(10)<<VAR(EnergyType(MULTIPLIER*pot))<<endl;
return EnergyType(pot);
}
public:
GCO_SRSMRFSolver(GraphModelPointerType graphModel,
double unaryRegWeight=1.0,
double pairwiseRegWeight=1.0,
double unarySegWeight=1.0,
double pairwiseSegWeight=1.0,
double pairwiseSegRegWeight=1.0,
int vverbose=false)
{
m_GraphModel=graphModel;
verbose=vverbose;
m_unarySegmentationWeight=unarySegWeight;
m_pairwiseSegmentationWeight=pairwiseSegWeight;
m_unaryRegistrationWeight=unaryRegWeight;
m_pairwiseRegistrationWeight=pairwiseRegWeight;
m_pairwiseSegmentationRegistrationWeight=pairwiseSegRegWeight;
//createGraph();
//init();
m_optimizer=NULL;
regPairwise=NULL;
segPairwise=NULL;
srsPairwise=NULL;
m_labelOrder=std::vector<int>(this->m_GraphModel->nRegLabels());
//order registration labels such that they start with zero displacement
m_zeroDisplacementLabel=this->m_GraphModel->getLabelMapper()->getZeroDisplacementIndex();
m_labelOrder[0]=m_zeroDisplacementLabel;
for (int l=0;l<(this->m_GraphModel->nRegLabels());++l){
if (l < m_labelOrder[0])
m_labelOrder[l+1]=l;
else if (l>m_labelOrder[0])
m_labelOrder[l]=l;
}
srand ( time(NULL) );
m_cachePotentials=false;
m_deleteRegNeighb=false;
}
GCO_SRSMRFSolver() {
}
~GCO_SRSMRFSolver()
{
LOGV(1)<<"Deleting GCO_MRF Sovler " << std::endl;
if (m_register) delete regPairwise;
if (m_segment) delete segPairwise;
if (m_coherence) delete srsPairwise;
if (m_numberOfNeighborsofEachNode){
delete [] m_numberOfNeighborsofEachNode;
#ifdef ALLOCINDIVIDUAL
for (int i = 0;i< GLOBALnRegNodes+GLOBALnSegNodes; ++i){
if ( m_neighbourArray[i]!=NULL ) delete [] m_neighbourArray[i];
if ( m_weights[i] !=NULL)delete [] m_weights[i];
}
#else
if (m_register){
if (m_deleteRegNeighb){
for (int i = 0;i< GLOBALnRegNodes; ++i){
delete [] m_neighbourArray[i];
delete [] m_weights[i];
}
}
else{
delete [] m_regNeighbors;
delete [] m_regWeights;
}
}
if (m_segment){
delete [] m_segNeighbors;
delete [] m_segWeights;
}
#endif
delete [] m_neighbourArray;
delete [] m_weights;
}
delete m_optimizer;
}
virtual void setPotentialCaching(bool enableCaching){m_cachePotentials=enableCaching;}
virtual void createGraph(){
clock_t start = clock();
{
m_segment=false;
m_register=false;
m_segment=0;
m_register=0;
}
LOGV(1)<<"starting graph init"<<std::endl;
this->m_GraphModel->Init();
clock_t endUnary = clock();
double t1 = (float) ((double)(endUnary - start) / CLOCKS_PER_SEC);
tUnary+=t1;
nNodes=this->m_GraphModel->nNodes();
nEdges=this->m_GraphModel->nEdges();
nRegNodes=this->m_GraphModel->nRegNodes();
nSegNodes=this->m_GraphModel->nSegNodes();
m_start=start;
int edgeCount=0;
nRegLabels=this->m_GraphModel->nRegLabels();
nSegLabels=this->m_GraphModel->nSegLabels();
m_register=((m_pairwiseSegmentationRegistrationWeight>0 || m_unaryRegistrationWeight>0 || m_pairwiseRegistrationWeight>0) && nRegLabels>1);
m_segment=((m_pairwiseSegmentationRegistrationWeight>0 || m_unarySegmentationWeight>0 || m_pairwiseSegmentationWeight) && nSegLabels>1);
m_coherence=m_pairwiseSegmentationRegistrationWeight>0;
GLOBALnRegNodes= m_register*nRegNodes;
GLOBALnSegNodes= m_segment*nSegNodes;
GLOBALnRegLabels=m_register*nRegLabels;
GLOBALnSegLabels=m_segment*nSegLabels;
LOGV(5)<<VAR(GLOBALnRegNodes)<<" "<<VAR(GLOBALnRegLabels)<<" "<<VAR(GLOBALnSegNodes)<<" "<<VAR(GLOBALnSegLabels)<<std::endl;
if (m_optimizer) delete m_optimizer;
m_optimizer= new MRFType(GLOBALnSegNodes+GLOBALnRegNodes,GLOBALnRegLabels+GLOBALnSegLabels);
//set global size variables :(
{
S0=this->m_GraphModel->getImageSize()[0];
S1=this->m_GraphModel->getImageSize()[1];
}
//allocate neighbor structs
m_numberOfNeighborsofEachNode = new int[GLOBALnRegNodes+GLOBALnSegNodes];
memset(m_numberOfNeighborsofEachNode,0,(GLOBALnRegNodes+GLOBALnSegNodes)*sizeof(int));
#ifdef ALLOCINDIVIDUAL
m_neighbourArray = new int *[GLOBALnRegNodes+GLOBALnSegNodes];
memset(m_neighbourArray,NULL,(GLOBALnRegNodes+GLOBALnSegNodes)*(sizeof(int*)));
m_weights= new EnergyType *[GLOBALnRegNodes+GLOBALnSegNodes];
memset(m_weights,NULL,(GLOBALnRegNodes+GLOBALnSegNodes)*(sizeof(EnergyType*)));
#else
m_neighbourArray = new int *[GLOBALnRegNodes+GLOBALnSegNodes];
m_weights= new EnergyType *[GLOBALnRegNodes+GLOBALnSegNodes];
if (m_register){
int nNeighbors=2*D+m_coherence*this->m_GraphModel->getMaxRegSegNeighbors();
LOGV(6)<<VAR(nNeighbors)<<" max neighbors per registration node" <<std::endl;
if (nNeighbors<10){
m_regNeighbors = new int[GLOBALnRegNodes*nNeighbors];
memset(m_regNeighbors,0,GLOBALnRegNodes*nNeighbors*(sizeof(int)));
m_regWeights = new EnergyType[GLOBALnRegNodes*nNeighbors];
memset(m_regWeights,1,GLOBALnRegNodes*nNeighbors*(sizeof(EnergyType)));
for (int i=0;i<GLOBALnRegNodes;++i){
m_neighbourArray[i]=&m_regNeighbors[i*nNeighbors];
m_weights[i]=&m_regWeights[i*nNeighbors];
}
LOGV(6)<<"memory for reg neighbors primary structure : "<<1.0*GLOBALnRegNodes*nNeighbors*sizeof(int*)/1024/1024 <<"MB"<<std::endl;
}else{
LOGV(6)<<"allocating memory for registration node adjacency matrix individually.."<<std::endl;
for (int i=0;i<GLOBALnRegNodes;++i){
int nLocalNeighbors=2*D+this->m_GraphModel->getRegSegNeighbors(i).size();
m_neighbourArray[i]=new int[nLocalNeighbors];
m_weights[i]=new EnergyType[nLocalNeighbors];
}
m_deleteRegNeighb=true;
}
}
if (m_segment){
int nNeighbors=2*D+m_coherence*1;
m_segNeighbors = new int[GLOBALnSegNodes*nNeighbors];
m_segWeights = new EnergyType[GLOBALnSegNodes*nNeighbors];
memset(m_segNeighbors,0,GLOBALnSegNodes*nNeighbors*(sizeof(int)));
memset(m_segWeights,1,GLOBALnSegNodes*nNeighbors*(sizeof(EnergyType)));
for (int i=0;i<GLOBALnSegNodes;++i){
m_neighbourArray[i+GLOBALnRegNodes]=&m_segNeighbors[i*nNeighbors];
m_weights[i+GLOBALnRegNodes]=&m_segWeights[i*nNeighbors];
}
LOGV(6)<<"memory for seg neighbors primary structure : "<<1.0*GLOBALnSegNodes*nNeighbors*sizeof(int*)/1024/1024 <<"MB"<<std::endl;
}
#endif
logSetStage("Potential Functions");
// traverse grid
if ( m_register){
//RegUnaries
clock_t startUnary = clock();
//now compute&set all potentials
if (m_unaryRegistrationWeight>0){
//#pragma omp parallel for
//theoretically, this computation could be parallelized
//however, the computation/caching of the registration potentials is not thread safe at the moment
for (int l1=0;l1<nRegLabels;++l1)
{
int regLabel=m_labelOrder[l1];
std::vector<GCoptimization::SparseDataCost> costs(nRegNodes);
this->m_GraphModel->cacheRegistrationPotentials(regLabel);
for (int d=0;d<nRegNodes;++d){
costs[d].site=d;
costs[d].cost=m_unaryRegistrationWeight*this->m_GraphModel->getUnaryRegistrationPotential(d,regLabel);
if (m_coherence && !m_segment){
//pretty inefficient as the reg neighbors are recomputed #registrationLabels times for each registration node.
std::vector<int> regSegNeighbors=this->m_GraphModel->getRegSegNeighbors(d);
int nNeighbours=regSegNeighbors.size();
if (nNeighbours==0) {LOG<<"ERROR: node "<<d<<" seems to have no neighbors."<<std::endl;}
for (int i=0;i<nNeighbours;++i){
double coherencePot=m_pairwiseSegmentationRegistrationWeight*this->m_GraphModel->getPairwiseRegSegPotential(d,regSegNeighbors[i],regLabel,0);
LOGV(8)<<VAR(d)<<" "<<VAR(i)<<" "<<VAR(coherencePot)<<" "<<VAR(m_pairwiseSegmentationRegistrationWeight)<<std::endl;
costs[d].cost+=coherencePot;
}
}
}
m_optimizer->setDataCost(regLabel,&costs[0],nRegNodes);
}
}
clock_t endUnary = clock();
double t = (float) ((double)(endUnary - startUnary) / CLOCKS_PER_SEC);
LOGV(1)<<"Registration Unaries took "<<t<<" seconds."<<std::endl;
tUnary+=t;
// Pairwise potentials
if (m_cachePotentials)
regPairwise= new std::vector<std::vector<std::vector<std::map<int,float> > > > (nRegLabels,std::vector<std::vector<std::map<int,float> > >(nRegLabels,std::vector<std::map<int,float> > (nRegNodes) ) );
for (int d=0;d<nRegNodes;++d){
m_optimizer->setLabel(d,m_zeroDisplacementLabel);
{//pure Registration
std::vector<int> neighbours= this->m_GraphModel->getForwardRegistrationNeighbours(d);
int nNeighbours=neighbours.size();
for (int i=0;i<nNeighbours;++i){
//LOG<<d<<" "<<regNodes[d]<<" "<<i<<" "<<neighbours[i]<<std::endl;
//m_optimizer->setNeighbors(d,neighbours[i],1);
addNeighbor(d,neighbours[i],m_numberOfNeighborsofEachNode,m_neighbourArray,m_weights);
if (m_cachePotentials){
for (int l1=0;l1<nRegLabels;++l1){
for (int l2=0;l2<nRegLabels;++l2){
if (m_pairwiseRegistrationWeight>0)
(*regPairwise)[l1][l2][d][neighbours[i]] = m_pairwiseRegistrationWeight*this->m_GraphModel->getPairwiseRegistrationPotential(d,neighbours[i],l1,l2);
else
(*regPairwise)[l1][l2][d][neighbours[i]] = 0.0;
}
}
}
edgeCount++;
}
}
}
clock_t endPairwise = clock();
t = (float) ((double)(endPairwise-endUnary ) / CLOCKS_PER_SEC);
LOGV(1)<<"Registration pairwise took "<<t<<" seconds."<<std::endl;
LOGV(1)<<"Approximate size of reg pairwise: "<<1.0/(1024*1024)*nRegNodes*nRegLabels*nRegLabels*sizeof(double)*m_cachePotentials<<" mb."<<std::endl;
tPairwise+=t;
}
if (m_segment){
//SegUnaries
clock_t startUnary = clock();
for (int l1=0;l1<nSegLabels;++l1)
{
//LOGV(4)<<"Allocating seg unaries for label "<<l1<<", using "<<1.0*nSegNodes*sizeof( GCoptimization::SparseDataCost ) /(1024*1024)<<" mb memory"<<std::std::endl;
std::vector<GCoptimization::SparseDataCost> costas(nSegNodes);
int c=0;
for (int d=0;d<nSegNodes;++d){
double unarySegCost=this->m_GraphModel->getUnarySegmentationPotential(d,l1);
if ( unarySegCost<10000){
costas[c].cost=m_unarySegmentationWeight*unarySegCost;
LOGV(10)<<"node "<<d<<"; seg unary label: "<<l1<<" "<<m_unarySegmentationWeight*this->m_GraphModel->getUnarySegmentationPotential(d,l1)<<std::endl;
costas[c].site=d+GLOBALnRegNodes;
if (m_coherence && !m_register){
double coherenceCost=m_pairwiseSegmentationRegistrationWeight*this->m_GraphModel->getPairwiseRegSegPotential(d,0,l1);
LOGV(8)<<VAR(d)<<" "<<VAR(coherenceCost)<<" "<<VAR(m_pairwiseSegmentationRegistrationWeight)<<std::endl;
costas[c].cost+=coherenceCost;
}
++c;
}
}
costas.resize(c);
LOGV(2)<<"Number of nodes with segmentation label "<<l1<<": :"<<c<<std::endl;
m_optimizer->setDataCost(l1+GLOBALnRegLabels,&costas[0],c);
}
clock_t endUnary = clock();
double t = (float) ((double)(endUnary - startUnary) / CLOCKS_PER_SEC);
LOGV(1)<<"Segmentation Unaries took "<<t<<" seconds."<<std::endl;
LOGV(1)<<"Approximate size of seg unaries: "<<1.0/(1024*1024)*nSegNodes*nSegLabels*sizeof(double)<<" mb."<<std::endl;
int nSegEdges=0,nSegRegEdges=0;
//Segmentation smoothness cache
if (m_cachePotentials){
segPairwise= new std::vector<std::vector<std::vector<std::vector<float> > > > (GLOBALnSegLabels,std::vector<std::vector<std::vector<float> > >(GLOBALnSegLabels,std::vector< std::vector<float> > (GLOBALnSegNodes,std::vector<float> (D)) ) );
srsPairwise= new std::vector<std::vector<std::vector<float > > > (GLOBALnSegLabels,std::vector<std::vector<float > >(GLOBALnRegLabels,std::vector<float>(GLOBALnSegNodes) ) );
}
for (int d=0;d<nSegNodes;++d){
int initLabel= this->m_GraphModel->GetTargetSegmentationAtIdx(d);
m_optimizer->setLabel(d+GLOBALnRegNodes,initLabel+GLOBALnRegLabels);
//pure Segmentation
std::vector<int> neighbours= this->m_GraphModel->getForwardSegmentationNeighbours(d);
int nNeighbours=neighbours.size();
for (int i=0;i<nNeighbours;++i){
nSegEdges++;
//m_optimizer->setNeighbors(d+GLOBALnRegNodes,neighbours[i]+GLOBALnRegNodes,1);
addNeighbor(d+GLOBALnRegNodes,neighbours[i]+GLOBALnRegNodes,m_numberOfNeighborsofEachNode,m_neighbourArray,m_weights);
edgeCount++;
if (m_cachePotentials){
for (int l1=0;l1<nSegLabels;++l1){
for (int l2=0;l2<nSegLabels;++l2){
LOGV(25)<<VAR(d)<<" "<<VAR(l1)<<" "<<VAR(neighbours[i])<<" "<<l2<<std::endl;
if (m_pairwiseSegmentationWeight>0){
(*segPairwise)[l1][l2][d][i] = m_pairwiseSegmentationWeight*this->m_GraphModel->getPairwiseSegmentationPotential(d,neighbours[i],l1,l2);
}else{
(*segPairwise)[l1][l2][d][i] = 0.0;
}
}
}
}
}
if (m_register && m_coherence){
std::vector<int> segRegNeighbors=this->m_GraphModel->getSegRegNeighbors(d);
nNeighbours=segRegNeighbors.size();
if (nNeighbours==0) {LOG<<"ERROR: node "<<d<<" seems to have no neighbors."<<std::endl;}
for (int i=0;i<nNeighbours;++i){
//m_optimizer->setNeighbors(d+GLOBALnRegNodes,segRegNeighbors[i],1);
addNeighbor(d+GLOBALnRegNodes,segRegNeighbors[i],m_numberOfNeighborsofEachNode,m_neighbourArray,m_weights);
edgeCount++;
if (m_cachePotentials){
nSegRegEdges++;
for (int l1=0;l1<nSegLabels;++l1){
for (int l2=0;l2<nRegLabels;++l2){
//forward
LOGV(25)<<VAR(d)<<" "<<VAR(l1)<<" "<<VAR(segRegNeighbors[i])<<" "<<VAR(l2)<<std::endl;
if (m_pairwiseSegmentationRegistrationWeight>0){
(*srsPairwise)[l1][l2][d]=m_pairwiseSegmentationRegistrationWeight*this->m_GraphModel->getPairwiseRegSegPotential(segRegNeighbors[i],d,l2,l1);
}else{
(*srsPairwise)[l1][l2][d]=0.0;
}
}
}
}
}
}
}
clock_t endPairwise = clock();
t = (float) ((double)(endPairwise-endUnary ) / CLOCKS_PER_SEC);
LOGV(1)<<"Segmentation + SRS pairwise took "<<t<<" seconds."<<std::endl;
LOGV(1)<<"Approximate size of seg pairwise: "<<1.0/(1024*1024)*nSegEdges*nSegLabels*nSegLabels*sizeof(double)*m_cachePotentials<<" mb."<<std::endl;
LOGV(1)<<"Approximate size of SRS pairwise: "<<1.0/(1024*1024)*nSegRegEdges*nSegLabels*nRegLabels*sizeof(double)*m_cachePotentials<<" mb."<<std::endl;
}
m_optimizer->setSmoothCost(&GLOBALsmoothFunction);
m_optimizer->setAllNeighbors(m_numberOfNeighborsofEachNode,m_neighbourArray,m_weights);
clock_t finish = clock();
double t = (float) ((double)(finish - start) / CLOCKS_PER_SEC);
//tInterpolation+=t;
LOGV(1)<<"Finished init after "<<t<<" seconds"<<std::endl;
nEdges=edgeCount;
logResetStage;
std::vector<int> order(GLOBALnRegLabels+GLOBALnSegLabels);
for (int l=0;l<GLOBALnSegLabels;++l){
order[l]=GLOBALnRegLabels+l;
}
for (int l=0;l<GLOBALnRegLabels;++l){
order[l+GLOBALnSegLabels]=l;
}
#if 1
m_optimizer->setLabelOrder(&order[0],GLOBALnRegLabels+GLOBALnSegLabels);
#else
bool random = true;
m_optimizer->setLabelOrder(random);
#endif
int verbosity=0;
if (verbose>7)
verbosity=2;
else if (verbose>3)
verbosity=1;
m_optimizer->setVerbosity(verbosity);
}
virtual double optimize(int maxIter=20){
logSetStage("GC-Optimizer");
clock_t opt_start=clock();
double energy;//=m_optimizer->compute_energy();
//LOGV(2)<<VAR(energy)<<std::endl;
try{
m_optimizer->expansion(maxIter==0?-1:maxIter);
//m_optimizer->swap(maxIter);
}catch (GCException e){
e.Report();
}
energy=m_optimizer->compute_energy();
clock_t finish = clock();
tOpt+=((double)(finish-opt_start)/CLOCKS_PER_SEC);
float t = (float) ((double)(finish - m_start) / CLOCKS_PER_SEC);
LOG<<"Finished optimization after "<<t<<" , resulting energy is "<<energy<<std::endl;
logResetStage;
return energy;
}
virtual double optimizeOneStep(int currentIter , bool & converged){
clock_t opt_start=clock();
double energy;//=
//LOGV(2)<<VAR(energy)<<std::endl;
try{
m_optimizer->expansion(1);
//m_optimizer->swap(maxIter);
}catch (GCException e){
e.Report();
}
energy=m_optimizer->compute_energy();
clock_t finish = clock();
tOpt+=((double)(finish-opt_start)/CLOCKS_PER_SEC);
float t = (float) ((double)(finish - m_start) / CLOCKS_PER_SEC);
LOG<<VAR(currentIter)<<" Finished optimization after "<<t<<" , resulting energy is "<<energy<<std::endl;
if (currentIter>0){
converged= (converged || (fabs(this->m_lastLowerBound-energy) < 1e-6 * this->m_lastLowerBound ));
}
//misuse member variable for storing last energy
this->m_lastLowerBound=energy;
return -1;
}
virtual std::vector<int> getDeformationLabels(){
std::vector<int> Labels(nRegNodes,0);
if (m_register){
for (int i=0;i<nRegNodes;++i){
Labels[i]=m_optimizer->whatLabel(i);
LOGV(20)<<"DEF "<<VAR(i)<<" "<<VAR(Labels[i])<<std::endl;
}
}
return Labels;
}
virtual std::vector<int> getSegmentationLabels(){
std::vector<int> Labels(nSegNodes,0);
if (m_segment){
for (int i=0;i<nSegNodes;++i){
Labels[i]=m_optimizer->whatLabel(i+GLOBALnRegNodes)-GLOBALnRegLabels;
Labels[i]=Labels[i]>0?Labels[i]:0;
LOGV(20)<<"SEG "<<VAR(i)<<" "<<VAR(Labels[i])<<std::endl;
}
}
return Labels;
}
void evalSolution(){
LOG<<"NYI"<<std::endl;
}
void addNeighbor(int id1, int id2, int * neighbCount, int **neighbors, EnergyType ** weights){
LOGV(15)<<"Adding neighbors "<<id1<<" "<<id2<<" with counts "<<VAR(neighbCount[id1])<< " "<<VAR(neighbCount[id2])<<std::endl;
//allocate memory if not yet allocated
#ifdef ALLOCINDIVIDUAL
if (neighbCount[id1] == 0){
int nNeighbors=2*D;
if (id1>=GLOBALnRegLabels && m_coherence){
nNeighbors+=1;
}else if (id1<GLOBALnRegLabels && m_coherence){
nNeighbors+=this->m_GraphModel->getRegSegNeighbors(id1).size();
}
neighbors[id1]=new int[nNeighbors];
weights[id1]=new EnergyType[nNeighbors];
LOGV(15)<<"allocated id1"<<std::endl;
}
if (neighbCount[id2] == 0){
int nNeighbors=2*D;
if (id2>=GLOBALnRegLabels && m_coherence){
nNeighbors+=1;
}else if (id2<GLOBALnRegLabels && m_coherence){
nNeighbors+=this->m_GraphModel->getRegSegNeighbors(id2).size();
}
neighbors[id2]=new int[nNeighbors];
weights[id2]=new EnergyType[nNeighbors];
LOGV(15)<<"allocated id2"<<std::endl;
}
#endif
neighbors[id1][neighbCount[id1]]=id2;
weights[id1][neighbCount[id1]]=1;
neighbCount[id1]++;
LOGV(15)<<"added id1->id2"<<std::endl;
neighbors[id2][neighbCount[id2]]=id1;
weights[id2][neighbCount[id2]]=1;
neighbCount[id2]++;
LOGV(15)<<"added id2->id1"<<std::endl;
}
};
template<class T> std::vector<std::vector<std::vector<std::map<int,float> > > > * GCO_SRSMRFSolver<T>::regPairwise = NULL;
template<class T> std::vector<std::vector<std::vector<std::vector<float> > > > * GCO_SRSMRFSolver<T>::segPairwise = NULL;
template<class T> std::vector<std::vector<std::vector<float > > > * GCO_SRSMRFSolver<T>::srsPairwise = NULL;
template<class T> typename GCO_SRSMRFSolver<T>::GraphModelPointerType GCO_SRSMRFSolver<T>::m_GraphModel=NULL;
template<class T> int GCO_SRSMRFSolver<T>::S0=0;
template<class T> int GCO_SRSMRFSolver<T>::S1=0;
template<class T> int GCO_SRSMRFSolver<T>::GLOBALnRegNodes=0;
template<class T> int GCO_SRSMRFSolver<T>::GLOBALnSegNodes=0;
template<class T> int GCO_SRSMRFSolver<T>::GLOBALnRegLabels=0;
template<class T> int GCO_SRSMRFSolver<T>::GLOBALnSegLabels=0;
template<class T> double GCO_SRSMRFSolver<T>::m_pairwiseSegmentationRegistrationWeight=0;
template<class T> double GCO_SRSMRFSolver<T>::m_pairwiseSegmentationWeight=0;
template<class T> double GCO_SRSMRFSolver<T>::m_pairwiseRegistrationWeight=0;
template<class T> bool GCO_SRSMRFSolver<T>::m_cachePotentials=false;
}
|
helpers.c | /*******************************************************************************
Collective Matrix Factorization
-------------------------------
This is a module for multi-way factorization of sparse and dense matrices
intended to be used for recommender system with explicit feedback data plus
side information about users and/or items.
The reference papers are:
(a) Cortes, David.
"Cold-start recommendations in Collective Matrix Factorization."
arXiv preprint arXiv:1809.00366 (2018).
(b) Singh, Ajit P., and Geoffrey J. Gordon.
"Relational learning via collective matrix factorization."
Proceedings of the 14th ACM SIGKDD international conference on
Knowledge discovery and data mining. 2008.
(c) Hu, Yifan, Yehuda Koren, and Chris Volinsky.
"Collaborative filtering for implicit feedback datasets."
2008 Eighth IEEE International Conference on Data Mining.
Ieee, 2008.
(d) Takacs, Gabor, Istvan Pilaszy, and Domonkos Tikk.
"Applications of the conjugate gradient method for
implicit feedback collaborative filtering."
Proceedings of the fifth ACM conference on
Recommender systems. 2011.
(e) Rendle, Steffen, Li Zhang, and Yehuda Koren.
"On the difficulty of evaluating baselines:
A study on recommender systems."
arXiv preprint arXiv:1905.01395 (2019).
(f) Franc, Vojtech, Vaclav Hlavac, and Mirko Navara.
"Sequential coordinate-wise algorithm for the
non-negative least squares problem."
International Conference on Computer Analysis of Images
and Patterns. Springer, Berlin, Heidelberg, 2005.
(g) Zhou, Yunhong, et al.
"Large-scale parallel collaborative filtering for
the netflix prize."
International conference on algorithmic applications in management.
Springer, Berlin, Heidelberg, 2008.
For information about the models offered here and how they are fit to
the data, see the files 'collective.c' and 'offsets.c'.
Written for C99 standard and OpenMP version 2.0 or higher, and aimed to be
used either as a stand-alone program, or wrapped into scripting languages
such as Python and R.
<https://www.github.com/david-cortes/cmfrec>
MIT License:
Copyright (c) 2020-2021 David Cortes
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*******************************************************************************/
#include "cmfrec.h"
/* Note: in x86_64 computers, there's hardly any speed up from having > 2
threads zeroing out an array */
void set_to_zero_(real_t *arr, size_t n, int nthreads)
{
if (n == 0) return;
#if defined(_OPENMP)
nthreads = (nthreads > 1)? 2 : 1;
size_t chunk_size = n / (size_t)nthreads;
size_t remainder = n % (size_t)nthreads;
int_t i = 0;
if (nthreads > 1 && n > (size_t)1e8)
{
#pragma omp parallel for schedule(static, 1) \
firstprivate(arr, chunk_size, nthreads) num_threads(nthreads)
for (i = 0; i < nthreads; i++)
memset(arr + i * chunk_size, 0, chunk_size*sizeof(real_t));
if (remainder > 0)
memset(arr + nthreads * chunk_size, 0, remainder*sizeof(real_t));
} else
#endif
{
memset(arr, 0, n*sizeof(real_t));
}
}
/* Note: in x86_64 computers, there's hardly any speed up from having > 4
threads copying arrays */
void copy_arr_(real_t *restrict src, real_t *restrict dest, size_t n, int nthreads)
{
/* Note: don't use BLAS scopy as it's actually much slower */
if (n == 0) return;
#if defined(_OPENMP)
if (nthreads > 1 && n > (size_t)1e8)
{
nthreads = cap_to_4(nthreads);
size_t chunk_size = n / (size_t)nthreads;
size_t remainder = n % (size_t)nthreads;
int_t i = 0;
#pragma omp parallel for schedule(static, 1) \
firstprivate(src, dest, chunk_size, nthreads) num_threads(nthreads)
for (i = 0; i < nthreads; i++)
memcpy(dest + i * chunk_size, src + i * chunk_size, chunk_size*sizeof(real_t));
if (remainder > 0)
memcpy(dest + nthreads*chunk_size, src + nthreads*chunk_size, remainder*sizeof(real_t));
} else
#endif
{
memcpy(dest, src, n*sizeof(real_t));
}
}
/* Note: the C99 standard only guarantes that isnan(NAN)!=0, and some compilers
like mingw64 will NOT make isnan(NAN)==1. */
int_t count_NAs(real_t arr[], size_t n, int nthreads)
{
int_t cnt_NA = 0;
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n) reduction(+:cnt_NA)
for (size_t_for ix = 0; ix < n; ix++)
cnt_NA += isnan(arr[ix]) != 0;
if (cnt_NA < 0) cnt_NA = INT_MAX; /* <- overflow */
return cnt_NA;
}
void count_NAs_by_row
(
real_t *restrict arr, int_t m, int_t n,
int_t *restrict cnt_NA, int nthreads,
bool *restrict full_dense, bool *restrict near_dense,
bool *restrict some_full
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, arr, cnt_NA)
for (size_t_for row = 0; row < (size_t)m; row++)
{
int_t cnt = 0;
for (size_t col = 0; col < (size_t)n; col++)
cnt += isnan(arr[col + row*n]) != 0;
cnt_NA[row] = cnt;
}
*full_dense = true;
for (int_t ix = 0; ix < m; ix++) {
if (cnt_NA[ix]) {
*full_dense = false;
break;
}
}
/* Will be considered near-dense if at least 70% of the rows have
no missing values.
This is used later in order to decide whether to use a gradient-
based approach or closed-form when optimizing a matrix in isolation */
*near_dense = false;
int_t cnt_rows_w_NA = 0;
if (!(*full_dense))
{
for (int_t ix = 0; ix < m; ix++)
cnt_rows_w_NA += (cnt_NA[ix] > 0);
if ((m - cnt_rows_w_NA) >= (int)(0.75 * (double)m))
*near_dense = true;
}
*some_full = *full_dense;
if (!(*full_dense))
{
for (int_t ix = 0; ix < m; ix++)
{
if (cnt_NA[ix] == 0) {
*some_full = true;
break;
}
}
}
}
void count_NAs_by_col
(
real_t *restrict arr, int_t m, int_t n,
int_t *restrict cnt_NA,
bool *restrict full_dense, bool *restrict near_dense,
bool *restrict some_full
)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
cnt_NA[col] += isnan(arr[col + row*n]) != 0;
*full_dense = true;
for (int_t ix = 0; ix < n; ix++) {
if (cnt_NA[ix]) {
*full_dense = false;
break;
}
}
*near_dense = false;
int_t cnt_rows_w_NA = 0;
if (!(*full_dense))
{
for (int_t ix = 0; ix < n; ix++)
cnt_rows_w_NA += (cnt_NA[ix] > 0);
if ((n - cnt_rows_w_NA) >= (int_t)(0.75 * (real_t)n))
*near_dense = true;
}
*some_full = *full_dense;
if (!(*full_dense))
{
for (int_t ix = 0; ix < n; ix++)
{
if (cnt_NA[ix] == 0) {
*some_full = true;
break;
}
}
}
}
void sum_by_rows(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, A, outp)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double rsum = 0;
for (size_t col = 0; col < (size_t)n; col++)
rsum += A[col + row*(size_t)n];
outp[row] = rsum;
}
}
void sum_by_cols(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, size_t lda, int nthreads)
{
#ifdef _OPENMP
/* Note: GCC and CLANG do a poor optimization when the array to sum has many
rows and few columns, which is the most common use-case for this */
if ((real_t)n > 1e3*(real_t)m && nthreads > 4) /* this assumes there's many columns, in which case there's a speedup */
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long col;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(A, outp, m, n, lda)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double csum = 0;
for (size_t row = 0; row < (size_t)m; row++)
csum += A[col + row*lda];
outp[col] = csum;
}
}
else
#endif
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
outp[col] += A[col + row*lda];
}
}
void mat_plus_rowvec(real_t *restrict A, real_t *restrict b, int_t m, int_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, b, m, n)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
A[col + (size_t)row*n] += b[row];
}
void mat_plus_colvec(real_t *restrict A, real_t *restrict b, real_t alpha, int_t m, int_t n, size_t lda, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, lda, A, b)
for (size_t_for row = 0; row < (size_t)m; row++)
cblas_taxpy(n, alpha, b, 1, A + row*lda, 1);
}
void mat_minus_rowvec2
(
real_t *restrict Xfull,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict b, int_t m, int_t n, int nthreads
)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row, ix;
#endif
if (Xfull != NULL)
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(Xfull, m, n, b)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n] -= b[row];
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(X, b, ixA, nnz)
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= b[ixA[ix]];
}
}
void mat_minus_colvec2
(
real_t *restrict Xfull,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict b, int_t m, int_t n, int nthreads
)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
if (Xfull != NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n] -= b[col];
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(X, b, ixA, nnz)
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= b[ixB[ix]];
}
}
void nan_to_zero(real_t *restrict arr, real_t *restrict comp, size_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, comp, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = (!isnan(comp[ix]))? arr[ix] : 0;
}
void mult_if_non_nan(real_t *restrict arr, real_t *restrict comp, real_t *restrict w, size_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, w, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = (!isnan(arr[ix]))? (w[ix] * arr[ix]) : (0);
}
void mult_elemwise(real_t *restrict inout, real_t *restrict other, size_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(inout, other, n)
for (size_t_for ix = 0; ix < n; ix++)
inout[ix] *= other[ix];
}
real_t sum_squares(real_t *restrict arr, size_t n, int nthreads)
{
double res = 0;
if (n < (size_t)INT_MAX)
return cblas_tdot((int)n, arr, 1, arr, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n) reduction(+:res)
for (size_t_for ix = 0; ix < n; ix++)
res += square(arr[ix]);
}
return (real_t)res;
}
void taxpy_large(real_t *restrict A, real_t x, real_t *restrict Y, size_t n, int nthreads)
{
if (n < (size_t)INT_MAX)
cblas_taxpy((int)n, x, A, 1, Y, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
if (x == 1.)
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, Y, n)
for (size_t_for ix = 0; ix < n; ix++)
Y[ix] += A[ix];
else
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, x, Y, n)
for (size_t_for ix = 0; ix < n; ix++)
Y[ix] = fma_t(x, A[ix], Y[ix]);
}
}
void tscal_large(real_t *restrict arr, real_t alpha, size_t n, int nthreads)
{
if (alpha == 1.)
return;
if (n < (size_t)INT_MAX)
cblas_tscal((int)n, alpha, arr, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, alpha, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] *= alpha;
}
}
/* Xoshiro256++ and Xoshiro128++
https://prng.di.unimi.it */
static inline uint64_t splitmix64(const uint64_t seed)
{
uint64_t z = (seed + 0x9e3779b97f4a7c15);
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
return z ^ (z >> 31);
}
#ifndef USE_XOSHIRO128
static inline uint64_t rotl64(const uint64_t x, const int k) {
return (x << k) | (x >> (64 - k));
}
static inline uint64_t xoshiro256pp(uint64_t state[4])
{
const uint64_t result = rotl64(state[0] + state[3], 23) + state[0];
const uint64_t t = state[1] << 17;
state[2] ^= state[0];
state[3] ^= state[1];
state[1] ^= state[2];
state[0] ^= state[3];
state[2] ^= t;
state[3] = rotl64(state[3], 45);
return result;
}
static inline void xoshiro256pp_jump(uint64_t state[4])
{
const uint64_t JUMP[] = { 0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
0xa9582618e03fc9aa, 0x39abdc4529b1661c };
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < (int)(sizeof (JUMP) / sizeof (*JUMP)); i++)
{
for (int b = 0; b < 64; b++)
{
if (JUMP[i] & UINT64_C(1) << b)
{
s0 ^= state[0];
s1 ^= state[1];
s2 ^= state[2];
s3 ^= state[3];
}
xoshiro256pp(state);
}
}
state[0] = s0;
state[1] = s1;
state[2] = s2;
state[3] = s3;
}
#else
static inline uint32_t rotl32(const uint32_t x, const int k) {
return (x << k) | (x >> (32 - k));
}
static inline uint32_t xoshiro128pp(uint32_t state[4])
{
const uint32_t result = rotl32(state[0] + state[3], 7) + state[0];
const uint32_t t = state[1] << 9;
state[2] ^= state[0];
state[3] ^= state[1];
state[1] ^= state[2];
state[0] ^= state[3];
state[2] ^= t;
state[3] = rotl32(state[3], 11);
return result;
}
static inline void xoshiro128pp_jump(uint32_t state[4])
{
const uint32_t JUMP[] = { 0x8764000b, 0xf542d2d3,
0x6fa035c3, 0x77f2db5b };
uint32_t s0 = 0;
uint32_t s1 = 0;
uint32_t s2 = 0;
uint32_t s3 = 0;
for(int i = 0; i < (int)(sizeof (JUMP) / sizeof (*JUMP)); i++)
{
for(int b = 0; b < 32; b++)
{
if (JUMP[i] & UINT32_C(1) << b)
{
s0 ^= state[0];
s1 ^= state[1];
s2 ^= state[2];
s3 ^= state[3];
}
xoshiro128pp(state);
}
}
state[0] = s0;
state[1] = s1;
state[2] = s2;
state[3] = s3;
}
#endif
/* Note: for double precision, this uses the Box-Muller transform
with raw form, which is less efficient than the polar form.
Nevertheless, from some experiments, this seems to give slightly better
end results when using double precision, even though it is slower and
loses more numeric precision by boxing to [0, 1] instead of [-1, 1].
For single precision, the polar form tended to give better results.
Note: if generating a uniform random number ~ (0,1), dividing
a random draw by the maximum will not result in a uniform
distribution, as the upper possible numbers are not evenly-spaced.
In these cases, it's necessary to take something up to 2^53 as
this is the interval that's evenly-representable. */
#if defined(USE_DOUBLE) || !(defined(USE_FLOAT) && defined(USE_XOSHIRO128))
void rnorm_xoshiro(real_t *seq, const size_t n, rng_state_t state[4])
{
#ifndef USE_XOSHIRO128
const uint64_t two53_i = (UINT64_C(1) << 53) - UINT64_C(1);
#endif
const double twoPI = 2. * M_PI;
uint64_t rnd1, rnd2;
#ifdef USE_XOSHIRO128
uint32_t rnd11, rnd12, rnd21, rnd22;
const uint32_t two21_i = (UINT32_C(1) << 21) - UINT32_C(1);
const uint32_t ONE = 1;
const bool is_little_endian = *((unsigned char*)&ONE) != 0;
#endif
double u, v;
size_t n_ = n / (size_t)2;
for (size_t ix = 0; ix < n_; ix++)
{
do
{
#ifdef USE_XOSHIRO128
rnd11 = xoshiro128pp(state);
rnd12 = xoshiro128pp(state);
rnd21 = xoshiro128pp(state);
rnd22 = xoshiro128pp(state);
#else
rnd1 = xoshiro256pp(state);
rnd2 = xoshiro256pp(state);
#endif
#if defined(DBL_MANT_DIG) && (DBL_MANT_DIG == 53) &&(FLT_RADIX == 2)
#ifdef USE_XOSHIRO128
if (is_little_endian) {
rnd12 = rnd12 & two21_i;
rnd22 = rnd22 & two21_i;
} else {
rnd11 = rnd11 & two21_i;
rnd21 = rnd21 & two21_i;
}
memcpy((char*)&rnd1, &rnd11, sizeof(uint32_t));
memcpy((char*)&rnd1 + sizeof(uint32_t), &rnd12, sizeof(uint32_t));
memcpy((char*)&rnd2, &rnd21, sizeof(uint32_t));
memcpy((char*)&rnd2 + sizeof(uint32_t), &rnd22, sizeof(uint32_t));
u = ldexp((double)rnd1, -53);
v = ldexp((double)rnd2, -53);
#else
u = ldexp((double)(rnd1 & two53_i), -53);
v = ldexp((double)(rnd2 & two53_i), -53);
#endif
#else
u = (double)rnd1 / (double)UINT64_MAX;
v = (double)rnd2 / (double)UINT64_MAX;
#endif
}
while (u == 0 || v == 0);
u = sqrt(-2. * log(u));
seq[(size_t)2*ix] = (real_t)ldexp(cos(twoPI * v) * u, -7);
seq[(size_t)2*ix + (size_t)1] = (real_t)ldexp(sin(twoPI * v) * u, -7);
}
if ((n % (size_t)2) != 0)
{
do
{
#ifdef USE_XOSHIRO128
rnd11 = xoshiro128pp(state);
rnd12 = xoshiro128pp(state);
rnd21 = xoshiro128pp(state);
rnd22 = xoshiro128pp(state);
#else
rnd1 = xoshiro256pp(state);
rnd2 = xoshiro256pp(state);
#endif
#if defined(DBL_MANT_DIG) && (DBL_MANT_DIG == 53) &&(FLT_RADIX == 2)
#ifdef USE_XOSHIRO128
if (is_little_endian) {
rnd12 = rnd12 & two21_i;
rnd22 = rnd22 & two21_i;
} else {
rnd11 = rnd11 & two21_i;
rnd21 = rnd21 & two21_i;
}
memcpy((char*)&rnd1, &rnd11, sizeof(uint32_t));
memcpy((char*)&rnd1 + sizeof(uint32_t), &rnd12, sizeof(uint32_t));
memcpy((char*)&rnd2, &rnd21, sizeof(uint32_t));
memcpy((char*)&rnd2 + sizeof(uint32_t), &rnd22, sizeof(uint32_t));
u = ldexp((double)rnd1, -53);
v = ldexp((double)rnd2, -53);
#else
u = ldexp((double)(rnd1 & two53_i), -53);
v = ldexp((double)(rnd2 & two53_i), -53);
#endif
#else
u = (double)rnd1 / (double)UINT64_MAX;
v = (double)rnd2 / (double)UINT64_MAX;
#endif
}
while (u == 0 || v == 0);
u = sqrt(-2. * log(u));
seq[n - (size_t)1] = (real_t)ldexp(cos(twoPI * v) * u, -7);
}
}
#else
void rnorm_xoshiro(float *seq, const size_t n, rng_state_t state[4])
{
const uint32_t two25_i = (UINT32_C(1) << 25) - UINT32_C(1);
const int32_t two24_i = (UINT32_C(1) << 24);
uint32_t rnd1, rnd2;
#ifndef USE_XOSHIRO128
uint64_t rnd0;
#endif
float u, v, s;
size_t n_ = n / (size_t)2;
for (size_t ix = 0; ix < n_; ix++)
{
do
{
#ifdef USE_XOSHIRO128
rnd1 = xoshiro128pp(state);
rnd2 = xoshiro128pp(state);
#else
rnd0 = xoshiro256pp(state);
memcpy(&rnd1, (char*)&rnd0, sizeof(uint32_t));
memcpy(&rnd2, (char*)&rnd0 + sizeof(uint32_t), sizeof(uint32_t));
#endif
#if defined(FLT_MANT_DIG) && (FLT_MANT_DIG == 24) &&(FLT_RADIX == 2)
u = ldexpf((float)((int32_t)(rnd1 & two25_i) - two24_i), -24);
v = ldexpf((float)((int32_t)(rnd2 & two25_i) - two24_i), -24);
#else
u = (float)rnd1 / (float)INT32_MAX;
v = (float)rnd2 / (float)INT32_MAX;
#endif
s = square(u) + square(v);
}
while (s == 0 || s >= 1);
s = sqrtf((-2.0f / s) * logf(s));
seq[(size_t)2*ix] = ldexpf(u * s, -7);
seq[(size_t)2*ix + (size_t)1] = ldexpf(v * s, -7);
}
if ((n % (size_t)2) != 0)
{
do
{
#ifdef USE_XOSHIRO128
rnd1 = xoshiro128pp(state);
rnd2 = xoshiro128pp(state);
#else
rnd0 = xoshiro256pp(state);
memcpy(&rnd1, (char*)&rnd0, sizeof(uint32_t));
memcpy(&rnd2, (char*)&rnd0 + sizeof(uint32_t), sizeof(uint32_t));
#endif
#if defined(FLT_MANT_DIG) && (FLT_MANT_DIG == 24) &&(FLT_RADIX == 2)
u = ldexpf((float)((int32_t)(rnd1 & two25_i) - two24_i), -24);
v = ldexpf((float)((int32_t)(rnd2 & two25_i) - two24_i), -24);
#else
u = (float)rnd1 / (float)INT32_MAX;
v = (float)rnd2 / (float)INT32_MAX;
#endif
s = square(u) + square(v);
}
while (s == 0 || s >= 1);
s = sqrtf((-2.0f / s) * logf(s));
seq[n - (size_t)1] = ldexpf(u * s, -7);
}
}
#endif
void seed_state(int_t seed, rng_state_t state[4])
{
#ifdef USE_XOSHIRO128
uint64_t s1 = splitmix64(seed);
uint64_t s2 = splitmix64(s1);
memcpy(state, &s1, sizeof(uint64_t));
memcpy(&state[2], &s2, sizeof(uint64_t));
#else
state[0] = splitmix64(seed);
state[1] = splitmix64(state[0]);
state[2] = splitmix64(state[1]);
state[3] = splitmix64(state[2]);
#endif
}
void fill_rnorm_buckets
(
const size_t n_buckets, real_t *arr, const size_t n,
real_t **ptr_bucket, size_t *sz_bucket, const size_t BUCKET_SIZE
)
{
if (n_buckets == 0 || n == 0) return;
for (size_t bucket = 0; bucket < n_buckets; bucket++)
{
ptr_bucket[bucket] = arr;
arr += BUCKET_SIZE;
}
sz_bucket[n_buckets-(size_t)1] = n - BUCKET_SIZE*(n_buckets-(size_t)1);
}
void rnorm_singlethread(ArraysToFill arrays, rng_state_t state[4])
{
if (arrays.sizeA)
rnorm_xoshiro(arrays.A, arrays.sizeA, state);
if (arrays.sizeB)
rnorm_xoshiro(arrays.B, arrays.sizeB, state);
}
/* This function generates random normal numbers in parallel, but dividing the
arrays to fill into buckets of up to 250k each. It uses the jumping technique
from the Xorshiro family in order to ensure that the generated numbers will
not overlap. */
int_t rnorm_parallel(ArraysToFill arrays, int_t seed, int nthreads)
{
#ifdef USE_R_RNG
GetRNGstate();
for (size_t ix = 0; ix < arrays.sizeA; ix++)
arrays.A[ix] = norm_rand();
for (size_t ix = 0; ix < arrays.sizeB; ix++)
arrays.B[ix] = norm_rand();
PutRNGstate();
return 0;
#endif
const size_t BUCKET_SIZE = (size_t)250000;
rng_state_t initial_state[4];
seed_state(seed, initial_state);
if (arrays.sizeA + arrays.sizeB < BUCKET_SIZE)
{
rnorm_singlethread(arrays, initial_state);
return 0;
}
const size_t buckA = arrays.sizeA / BUCKET_SIZE + (arrays.sizeA % BUCKET_SIZE) != 0;
const size_t buckB = arrays.sizeB / BUCKET_SIZE + (arrays.sizeB % BUCKET_SIZE) != 0;
const size_t tot_buckets = buckA + buckB;
real_t **ptr_bucket = (real_t**)malloc(tot_buckets*sizeof(real_t*));
size_t *sz_bucket = (size_t*)malloc(tot_buckets*sizeof(size_t));
rng_state_t *states = (rng_state_t*)malloc((size_t)4*tot_buckets*sizeof(rng_state_t));
if (ptr_bucket == NULL || sz_bucket == NULL || states == NULL)
{
free(ptr_bucket);
free(sz_bucket);
free(states);
return 1;
}
for (size_t ix = 0; ix < tot_buckets; ix++)
sz_bucket[ix] = BUCKET_SIZE;
memcpy(states, initial_state, 4*sizeof(rng_state_t));
for (size_t ix = 1; ix < tot_buckets; ix++)
{
memcpy(states + (size_t)4*ix, states + (size_t)4*(ix-(size_t)1), 4*sizeof(rng_state_t));
#ifdef USE_XOSHIRO128
xoshiro128pp_jump(states + 4*ix);
#else
xoshiro256pp_jump(states + 4*ix);
#endif
}
real_t ** const ptr_bucket_ = ptr_bucket;
size_t * const sz_bucket_ = sz_bucket;
fill_rnorm_buckets(
buckA, arrays.A, arrays.sizeA,
ptr_bucket, sz_bucket, BUCKET_SIZE
);
ptr_bucket += buckA; sz_bucket += buckA;
fill_rnorm_buckets(
buckB, arrays.B, arrays.sizeB,
ptr_bucket, sz_bucket, BUCKET_SIZE
);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(states)
for (size_t_for ix = 0; ix < tot_buckets; ix++)
{
rng_state_t state[] = {states[(size_t)4*ix],
states[(size_t)4*ix + (size_t)1],
states[(size_t)4*ix + (size_t)2],
states[(size_t)4*ix + (size_t)3]};
rnorm_xoshiro(ptr_bucket_[ix], sz_bucket_[ix], state);
}
free(ptr_bucket_);
free(sz_bucket_);
free(states);
return 0;
}
void reduce_mat_sum(real_t *restrict outp, size_t lda, real_t *restrict inp,
int_t m, int_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
size_t m_by_n = m * n;
if (n > 1 || lda > 0)
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(outp, inp, m, n, nthreads)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t tid = 0; tid < (size_t)nthreads; tid++)
for (size_t col = 0; col < (size_t)n; col++)
outp[col + row*lda] += inp[tid*m_by_n + col + row*n];
else
for (size_t tid = 0; tid < (size_t)nthreads; tid++)
for (size_t row = 0; row < (size_t)m; row++)
outp[row] += inp[tid*m_by_n + row];
}
void exp_neg_x(real_t *restrict arr, size_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = exp_t(-arr[ix]);
}
void add_to_diag(real_t *restrict A, real_t val, size_t n)
{
for (size_t ix = 0; ix < n; ix++)
A[ix + ix*n] += val;
}
real_t sum_sq_div_w(real_t *restrict arr, real_t *restrict w, size_t n, bool compensated, int nthreads)
{
real_t res = 0;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, w, n) reduction(+:res)
for (size_t_for ix = 0; ix < n; ix++)
res += square(arr[ix]) / w[ix];
return res;
}
/* X <- alpha*A*B + X | A(m,k) is sparse CSR, B(k,n) is dense */
void tgemm_sp_dense
(
int_t m, int_t n, real_t alpha,
size_t indptr[], int_t indices[], real_t values[],
real_t DenseMat[], size_t ldb,
real_t OutputMat[], size_t ldc,
int nthreads
)
{
if (m <= 0 || indptr[0] == indptr[m])
return;
real_t *row_ptr;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
if (alpha != 1.)
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(m, n, alpha, ldb, ldc, OutputMat, DenseMat, indptr, indices, values) \
private(row_ptr)
for (size_t_for row = 0; row < (size_t)m; row++) {
row_ptr = OutputMat + row*ldc;
for (size_t col = indptr[row]; col < indptr[row+1]; col++)
cblas_taxpy(n, alpha*values[col], DenseMat + (size_t)indices[col]*ldb, 1, row_ptr, 1);
}
else
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(m, n, ldb, ldc, OutputMat, DenseMat, indptr, indices, values) \
private(row_ptr)
for (size_t_for row = 0; row < (size_t)m; row++) {
row_ptr = OutputMat + row*ldc;
for (size_t col = indptr[row]; col < indptr[row+1]; col++)
cblas_taxpy(n, values[col], DenseMat + (size_t)indices[col]*ldb, 1, row_ptr, 1);
}
}
/* x <- alpha*t(A)*v + x | A[m,n] is dense, v[m] is sparse, x[n] is dense */
void tgemv_dense_sp
(
int_t m, int_t n,
real_t alpha, real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
if (alpha != 1.)
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
else
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
/* Same but with an array of weights */
void tgemv_dense_sp_weighted
(
int_t m, int_t n,
real_t alpha[], real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha[ix]*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
/* Same, but with both array of weights and scalar weight */
void tgemv_dense_sp_weighted2
(
int_t m, int_t n,
real_t alpha[], real_t alpha2, real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha2*alpha[ix]*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
void tgemv_dense_sp_notrans
(
int_t m, int_t n,
real_t DenseMat[], int_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(m, vec_sp[ix],
DenseMat + ixB[ix], lda,
OutputVec, 1);
}
/* B[:m,:n] := A[:m,:n] */
void copy_mat
(
int_t m, int_t n,
real_t *restrict A, int_t lda,
real_t *restrict B, int_t ldb
)
{
char uplo = '?';
if (m == 0 && n == 0) return;
if (ldb == n && lda == n)
memcpy(B, A, (size_t)m*(size_t)n*sizeof(real_t));
else
tlacpy_(&uplo, &n, &m, A, &lda, B, &ldb);
}
/* B[:m,:n] = A[:m,:n] + B[:m,:n] */
void sum_mat
(
size_t m, size_t n,
real_t *restrict A, size_t lda,
real_t *restrict B, size_t ldb
)
{
/* Note1: do NOT change this to axpy, it gets a huge slow-down when
used with MKL for some reason. OpenBLAS still works fine though */
/* Note2: in most cases it is expected that m >> n */
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
B[col + row*ldb] += A[col + row*lda];
}
void transpose_mat(real_t *restrict A, size_t m, size_t n, real_t *restrict buffer_real_t)
{
memcpy(buffer_real_t, A, m*n*sizeof(real_t));
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
A[row + col*m] = buffer_real_t[col + row*n];
}
void transpose_mat2(real_t *restrict A, size_t m, size_t n, real_t *restrict outp)
{
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
outp[row + col*m] = A[col + row*n];
}
void transpose_mat3
(
real_t *restrict A, size_t lda,
size_t m, size_t n,
real_t *restrict outp, size_t ldb
)
{
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
outp[row + col*ldb] = A[col + row*lda];
}
int_t coo_to_csr_plus_alloc
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W,
int_t m, int_t n, size_t nnz,
size_t *restrict *csr_p, int_t *restrict *csr_i, real_t *restrict *csr_v,
real_t *restrict *csr_w
)
{
*csr_p = (size_t*)malloc(((size_t)m+(size_t)1)*sizeof(size_t));
*csr_i = (int_t*)malloc(nnz*sizeof(int_t));
*csr_v = (real_t*)malloc(nnz*sizeof(real_t));
if (*csr_p == NULL || *csr_i == NULL || *csr_v == NULL)
return 1;
if (W != NULL) {
*csr_w = (real_t*)malloc(nnz*sizeof(real_t));
if (*csr_w == NULL) return 1;
}
coo_to_csr(
Xrow, Xcol, Xval,
W,
m, n, nnz,
*csr_p, *csr_i, *csr_v,
(W == NULL)? ((real_t*)NULL) : (*csr_w)
);
return 0;
}
void coo_to_csr
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W,
int_t m, int_t n, size_t nnz,
size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v,
real_t *restrict csr_w
)
{
bool has_mem = true;
int_t *cnt_byrow = NULL;
produce_p:
{
memset(csr_p, 0, ((size_t)m+(size_t)1)*sizeof(size_t));
for (size_t ix = 0; ix < nnz; ix++)
csr_p[Xrow[ix]+(size_t)1]++;
for (int_t row = 0; row < m; row++)
csr_p[row+(size_t)1] += csr_p[row];
}
if (!has_mem) goto cleanup;
cnt_byrow = (int_t*)calloc(m, sizeof(int_t));
if (cnt_byrow != NULL)
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_w[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = W[ix];
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
goto cleanup;
}
else
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
csr_w[csr_p[Xrow[ix]+(size_t)1]] = W[ix];
}
has_mem = false;
goto produce_p;
}
cleanup:
free(cnt_byrow);
}
void coo_to_csr_and_csc
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W, int_t m, int_t n, size_t nnz,
size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v,
size_t *restrict csc_p, int_t *restrict csc_i, real_t *restrict csc_v,
real_t *restrict csr_w, real_t *restrict csc_w,
int nthreads
)
{
bool has_mem = true;
nthreads = (nthreads > 2)? 2 : 1;
int_t *cnt_byrow = NULL;
int_t *cnt_bycol = NULL;
produce_p:
{
memset(csr_p, 0, ((size_t)m+(size_t)1)*sizeof(size_t));
memset(csc_p, 0, ((size_t)n+(size_t)1)*sizeof(size_t));
for (size_t ix = 0; ix < nnz; ix++) {
csr_p[Xrow[ix]+(size_t)1]++;
csc_p[Xcol[ix]+(size_t)1]++;
}
for (int_t row = 0; row < m; row++)
csr_p[row+(size_t)1] += csr_p[row];
for (int_t col = 0; col < n; col++)
csc_p[col+(size_t)1] += csc_p[col];
}
if (!has_mem) goto cleanup;
cnt_byrow = (int_t*)calloc(m, sizeof(int_t));
cnt_bycol = (int_t*)calloc(n, sizeof(int_t));
#if defined(_OPENMP) && (_OPENMP > 201305) /* OpenMP >= 4.0 */
omp_set_max_active_levels(2);
#endif
if (cnt_byrow != NULL && cnt_bycol != NULL) {
#pragma omp parallel sections num_threads(nthreads)
{
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_w[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = W[ix];
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
}
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csc_v[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = Xval[ix];
csc_i[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]++] = Xrow[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csc_w[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = W[ix];
csc_v[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = Xval[ix];
csc_i[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]++] = Xrow[ix];
}
}
}
goto cleanup;
}
else {
#pragma omp parallel sections num_threads(nthreads)
{
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
csr_w[csr_p[Xrow[ix]+(size_t)1]] = W[ix];
}
}
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csc_i[--csc_p[Xcol[ix]+(size_t)1]] = Xrow[ix];
csc_v[csc_p[Xcol[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csc_i[--csc_p[Xcol[ix]+(size_t)1]] = Xrow[ix];
csc_v[csc_p[Xcol[ix]+(size_t)1]] = Xval[ix];
csc_w[csc_p[Xcol[ix]+(size_t)1]] = W[ix];
}
}
}
has_mem = false;
goto produce_p;
}
cleanup:
free(cnt_byrow);
free(cnt_bycol);
}
void row_means_csr(size_t indptr[], real_t *restrict values,
real_t *restrict output, int_t m, int nthreads)
{
int_t row = 0;
set_to_zero(values, m);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(indptr, values, output, m)
for (row = 0; row < m; row++)
{
double rsum = 0;
for (size_t ix = indptr[row]; ix < indptr[row+(size_t)1]; ix++)
rsum += values[ix];
output[row] = rsum;
}
nthreads = cap_to_4(nthreads);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(indptr, output, m)
for (row = 0; row < m; row++)
output[row] /= (real_t)(indptr[row+(size_t)1] - indptr[row]);
}
bool should_stop_procedure = false;
bool handle_is_locked = false;
void set_interrup_global_variable(int_t s)
{
#pragma omp critical
{
should_stop_procedure = true;
}
}
int_t lbfgs_printer_collective
(
void *instance,
const real_t *x,
const real_t *g,
const real_t fx,
const real_t xnorm,
const real_t gnorm,
const real_t step,
size_t n,
int_t k,
int_t ls
)
{
((data_collective_fun_grad*)instance)->niter = k;
int_t print_every = ((data_collective_fun_grad*)instance)->print_every;
if ((k % print_every) == 0 && print_every > 0) {
printf("Iteration %-4d - f(x)= %-8.03g - ||g(x)||= %-8.03g - ls=% 2d\n",
k, fx, gnorm, ls);
fflush(stdout);
}
if (should_stop_procedure)
return 1;
return 0;
}
int_t lbfgs_printer_offsets
(
void *instance,
const real_t *x,
const real_t *g,
const real_t fx,
const real_t xnorm,
const real_t gnorm,
const real_t step,
size_t n,
int_t k,
int_t ls
)
{
((data_offsets_fun_grad*)instance)->niter = k;
int_t print_every = ((data_offsets_fun_grad*)instance)->print_every;
if ((k % print_every) == 0 && print_every > 0) {
printf("Iteration %-5d - f(x)= %-8.03g - ||g(x)||= %-8.03g - ls=% 2d\n",
k, fx, gnorm, ls);
fflush(stdout);
}
if (should_stop_procedure)
return 1;
return 0;
}
bool check_is_sorted(int_t arr[], int_t n)
{
if (n <= 1) return true;
for (int_t ix = 0; ix < n-1; ix++)
if (arr[ix] > arr[ix+1]) return false;
return true;
}
/* https://www.stat.cmu.edu/~ryantibs/median/quickselect.c */
/* Some sample C code for the quickselect algorithm,
taken from Numerical Recipes in C. */
#define SWAP(a,b) temp=(a);(a)=(b);(b)=temp;
void qs_argpartition(int_t arr[], real_t values[], int_t n, int_t k)
{
int_t i,ir,j,l,mid;
int_t a,temp;
l=0;
ir=n-1;
for(;;) {
if (ir <= l+1) {
if (ir == l+1 && values[arr[ir]] > values[arr[l]]) {
SWAP(arr[l],arr[ir]);
}
return;
}
else {
mid=(l+ir) >> 1;
SWAP(arr[mid],arr[l+1]);
if (values[arr[l]] < values[arr[ir]]) {
SWAP(arr[l],arr[ir]);
}
if (values[arr[l+1]] < values[arr[ir]]) {
SWAP(arr[l+1],arr[ir]);
}
if (values[arr[l]] < values[arr[l+1]]) {
SWAP(arr[l],arr[l+1]);
}
i=l+1;
j=ir;
a=arr[l+1];
for (;;) {
do i++; while (values[arr[i]] > values[a]);
do j--; while (values[arr[j]] < values[a]);
if (j < i) break;
SWAP(arr[i],arr[j]);
}
arr[l+1]=arr[j];
arr[j]=a;
if (j >= k) ir=j-1;
if (j <= k) l=i;
}
}
}
void append_ones_last_col
(
real_t *restrict orig, size_t m, size_t n,
real_t *restrict outp
)
{
copy_mat(m, n,
orig, n,
outp, n+1);
for (size_t ix = 0; ix < m; ix++)
outp[n + ix*(n+(size_t)1)] = 1.;
}
void fill_lower_triangle(real_t A[], size_t n, size_t lda)
{
for (size_t row = 1; row < n; row++)
for (size_t col = 0; col < row; col++)
A[col + row*lda] = A[row + col*lda];
}
void print_err_msg(const char *msg)
{
#ifndef _FOR_R
fprintf(stderr, "%s", msg);
#else
fprintf(stderr, msg);
#endif
fflush(stderr);
}
void print_oom_message(void)
{
print_err_msg("Error: could not allocate enough memory.\n");
}
#ifdef _FOR_PYTHON
#define PY_MSG_MAX_LENGTH 256
void py_printf(const char *fmt, ...)
{
char msg[PY_MSG_MAX_LENGTH];
va_list args;
va_start(args, fmt);
vsnprintf(msg, PY_MSG_MAX_LENGTH, fmt, args);
va_end(args);
cy_printf(msg);
}
void py_errprintf(void *ignored, const char *fmt, ...)
{
char msg[PY_MSG_MAX_LENGTH];
va_list args;
va_start(args, fmt);
vsnprintf(msg, PY_MSG_MAX_LENGTH, fmt, args);
va_end(args);
cy_errprintf(msg);
}
void python_printmsg(char *msg)
{
PySys_WriteStdout("%s", msg);
}
void python_printerrmsg(char *msg)
{
PySys_WriteStderr("%s", msg);
}
#endif
void act_on_interrupt(int retval, bool handle_interrupt, bool print_msg)
{
if (retval == 3)
{
if (print_msg)
print_err_msg(" Error: procedure was interrupted.\n");
if (!handle_interrupt)
raise(SIGINT);
}
}
#ifdef _FOR_R
void R_nan_to_C_nan(real_t arr[], size_t n)
{
for (size_t ix = 0; ix < n; ix++)
arr[ix] = ISNAN(arr[ix])? NAN : arr[ix];
}
#endif
long double compensated_sum(real_t *arr, size_t n)
{
long double err = 0.;
long double diff = 0.;
long double temp;
long double res = 0;
for (size_t ix = 0; ix < n; ix++)
{
diff = arr[ix] - err;
temp = res + diff;
err = (temp - res) - diff;
res = temp;
}
return res;
}
long double compensated_sum_product(real_t *restrict arr1, real_t *restrict arr2, size_t n)
{
long double err = 0.;
long double diff = 0.;
long double temp;
long double res = 0;
for (size_t ix = 0; ix < n; ix++)
{
diff = fmal(arr1[ix], arr2[ix], -err);
temp = res + diff;
err = (temp - res) - diff;
res = temp;
}
return res;
}
#ifdef AVOID_BLAS_SYR
/* https://github.com/xianyi/OpenBLAS/issues/3237 */
void custom_syr(const int_t n, const real_t alpha, const real_t *restrict x, real_t *restrict A, const int_t lda)
{
real_t temp;
real_t *restrict Arow;
for (int i = 0; i < n; i++) {
temp = alpha*x[i];
Arow = A + (size_t)i*(size_t)lda;
for (int j = i; j < n; j++)
Arow[j] = fma_t(temp, x[j], Arow[j]);
}
}
#endif
void set_blas_threads(int nthreads_set, int *nthreads_curr)
{
#ifdef _FOR_R
/* https://gist.github.com/KRD1/2503984 */
if (!has_RhpcBLASctl || ptr_glob_lst == NULL || ptr_nthreads == NULL)
return;
int errinfo = 0;
if (nthreads_curr != NULL) {
SEXP nthreads_curr_R = R_tryEvalSilent(VECTOR_ELT(*ptr_glob_lst, 5),
VECTOR_ELT(*ptr_glob_lst, 0),
&errinfo);
if (!errinfo) {
*nthreads_curr = Rf_asInteger(nthreads_curr_R);
}
*nthreads_curr = max2(*nthreads_curr, 1);
}
*ptr_nthreads = nthreads_set;
errinfo = 0;
R_tryEvalSilent(VECTOR_ELT(*ptr_glob_lst, 4),
VECTOR_ELT(*ptr_glob_lst, 0),
&errinfo);
#elif defined(_FOR_PYTHON) && !defined(IS_PY_TEST)
if (nthreads_curr != NULL) {
*nthreads_curr = py_get_threads();
}
py_set_threads(nthreads_set);
#if defined(HAS_OPENBLAS)
openblas_set_num_threads(nthreads_set);
#endif
#elif defined(HAS_OPENBLAS)
if (nthreads_curr != NULL) {
*nthreads_curr = openblas_get_num_threads();
*nthreads_curr = max2(*nthreads_curr, 1);
}
openblas_set_num_threads(nthreads_set);
#elif defined(_OPENMP) && !defined(MKL_H) && !defined(HAS_MKL)
if (nthreads_curr != NULL) {
*nthreads_curr = omp_get_num_threads();
*nthreads_curr = max2(*nthreads_curr, 1);
}
omp_set_num_threads(nthreads_set);
#endif
}
#if defined(_FOR_R) && defined(WRAPPED_GELSD) && !defined(USE_FLOAT)
SEXP wrapper_GELSD(void *data)
{
Args_to_GELSD *data_ = (Args_to_GELSD*)data;
tgelsd_(data_->m, data_->n, data_->nrhs,
data_->A, data_->lda, data_->B, data_->ldb,
data_->S, data_->rcond, data_->rank,
data_->work, data_->lwork, data_->iwork,
data_->info);
return R_NilValue;
}
void clean_after_GELSD(void *cdata, Rboolean jump)
{
if (jump)
{
PointersToFree *cdata_ = (PointersToFree*)cdata;
for (size_t ix = 0; ix < cdata_->n_pointers; ix++)
free(cdata_->pointers[ix]);
GELSD_free_inputs = false;
}
}
#endif
bool get_has_openmp(void)
{
#ifdef _OPENMP
return true;
#else
return false;
#endif
}
|
region_layer.c | #include "region_layer.h"
#include "activations.h"
#include "blas.h"
#include "box.h"
#include "dark_cuda.h"
#include "utils.h"
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#define DOABS 1
region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes)
{
region_layer l = { (LAYER_TYPE)0 };
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.classes = classes;
l.coords = coords;
l.cost = (float*)calloc(1, sizeof(float));
l.biases = (float*)calloc(n * 2, sizeof(float));
l.bias_updates = (float*)calloc(n * 2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.max_boxes = max_boxes;
l.truths = max_boxes*(5);
l.delta = (float*)calloc(batch * l.outputs, sizeof(float));
l.output = (float*)calloc(batch * l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
l.backward = backward_region_layer;
#ifdef GPU
l.forward_gpu = forward_region_layer_gpu;
l.backward_gpu = backward_region_layer_gpu;
l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif
fprintf(stderr, "detection\n");
srand(time(0));
return l;
}
void resize_region_layer(layer *l, int w, int h)
{
#ifdef GPU
int old_w = l->w;
int old_h = l->h;
#endif
l->w = w;
l->h = h;
l->outputs = h*w*l->n*(l->classes + l->coords + 1);
l->inputs = l->outputs;
l->output = (float*)realloc(l->output, l->batch * l->outputs * sizeof(float));
l->delta = (float*)realloc(l->delta, l->batch * l->outputs * sizeof(float));
#ifdef GPU
if (old_w < w || old_h < h) {
cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
}
#endif
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w;
b.y = (j + logistic_activate(x[index + 1])) / h;
b.w = exp(x[index + 2]) * biases[2*n];
b.h = exp(x[index + 3]) * biases[2*n+1];
if(DOABS){
b.w = exp(x[index + 2]) * biases[2*n] / w;
b.h = exp(x[index + 3]) * biases[2*n+1] / h;
}
return b;
}
float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale)
{
box pred = get_region_box(x, biases, n, index, i, j, w, h);
float iou = box_iou(pred, truth);
float tx = (truth.x*w - i);
float ty = (truth.y*h - j);
float tw = log(truth.w / biases[2*n]);
float th = log(truth.h / biases[2*n + 1]);
if(DOABS){
tw = log(truth.w*w / biases[2*n]);
th = log(truth.h*h / biases[2*n + 1]);
}
delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0]));
delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1]));
delta[index + 2] = scale * (tw - x[index + 2]);
delta[index + 3] = scale * (th - x[index + 3]);
return iou;
}
void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss)
{
int i, n;
if(hier){
float pred = 1;
while(class_id >= 0){
pred *= output[index + class_id];
int g = hier->group[class_id];
int offset = hier->group_offset[g];
for(i = 0; i < hier->group_size[g]; ++i){
delta[index + offset + i] = scale * (0 - output[index + offset + i]);
}
delta[index + class_id] = scale * (1 - output[index + class_id]);
class_id = hier->parent[class_id];
}
*avg_cat += pred;
} else {
// Focal loss
if (focal_loss) {
// Focal Loss
float alpha = 0.5; // 0.25 or 0.5
//float gamma = 2; // hardcoded in many places of the grad-formula
int ti = index + class_id;
float pt = output[ti] + 0.000000000000001F;
// http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d
float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832
//float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
delta[index + n] *= alpha*grad;
if (n == class_id) *avg_cat += output[index + n];
}
}
else {
// default
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
if (n == class_id) *avg_cat += output[index + n];
}
}
}
}
float logit(float x)
{
return log(x/(1.-x));
}
float tisnan(float x)
{
return (x != x);
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc;
}
void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output);
void forward_region_layer(const region_layer l, network_state state)
{
int i,j,b,t,n;
int size = l.coords + l.classes + 1;
memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
#endif
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
l.output[index + 4] = logistic_activate(l.output[index + 4]);
}
}
#ifndef GPU
if (l.softmax_tree){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
} else if (l.softmax){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1);
}
}
}
#endif
if(!state.train) return;
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
float avg_iou = 0;
float recall = 0;
float avg_cat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
int class_count = 0;
*(l.cost) = 0;
for (b = 0; b < l.batch; ++b) {
if(l.softmax_tree){
int onlyclass_id = 0;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
if(!truth.x) break; // continue;
int class_id = state.truth[t*5 + b*l.truths + 4];
float maxp = 0;
int maxi = 0;
if(truth.x > 100000 && truth.y > 100000){
for(n = 0; n < l.n*l.w*l.h; ++n){
int index = size*n + b*l.outputs + 5;
float scale = l.output[index-1];
float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id);
if(p > maxp){
maxp = p;
maxi = n;
}
}
int index = size*maxi + b*l.outputs + 5;
delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++class_count;
onlyclass_id = 1;
break;
}
}
if(onlyclass_id) continue;
}
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
float best_iou = 0;
int best_class_id = -1;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
int class_id = state.truth[t * 5 + b*l.truths + 4];
if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file
if(!truth.x) break; // continue;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
best_class_id = state.truth[t*5 + b*l.truths + 4];
best_iou = iou;
}
}
avg_anyobj += l.output[index + 4];
l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
else{
if (best_iou > l.thresh) {
l.delta[index + 4] = 0;
if(l.classfix > 0){
delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss);
++class_count;
}
}
}
if(*(state.net.seen) < 12800){
box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
truth.w = l.biases[2*n];
truth.h = l.biases[2*n+1];
if(DOABS){
truth.w = l.biases[2*n]/l.w;
truth.h = l.biases[2*n+1]/l.h;
}
delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01);
}
}
}
}
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
int class_id = state.truth[t * 5 + b*l.truths + 4];
if (class_id >= l.classes) {
printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1);
getchar();
continue; // if label contains class_id more than number of classes in the cfg-file
}
if(!truth.x) break; // continue;
float best_iou = 0;
int best_index = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
//printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h);
box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
//printf("index %d %d\n",i, j);
for(n = 0; n < l.n; ++n){
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
if(l.bias_match){
pred.w = l.biases[2*n];
pred.h = l.biases[2*n+1];
if(DOABS){
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
}
}
//printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h);
pred.x = 0;
pred.y = 0;
float iou = box_iou(pred, truth_shift);
if (iou > best_iou){
best_index = index;
best_iou = iou;
best_n = n;
}
}
//printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h);
float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale);
if(iou > .5) recall += 1;
avg_iou += iou;
//l.delta[best_index + 4] = iou - l.output[best_index + 4];
avg_obj += l.output[best_index + 4];
l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
if (l.rescore) {
l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
}
if (l.map) class_id = l.map[class_id];
delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++count;
++class_count;
}
}
//printf("\n");
#ifndef GPU
flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0);
#endif
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
}
void backward_region_layer(const region_layer l, network_state state)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1);
}
void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i;
float *const predictions = l.output;
#pragma omp parallel for
for (i = 0; i < l.w*l.h; ++i){
int j, n;
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = i*l.n + n;
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index];
if(l.classfix == -1 && scale < .5) scale = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
if(l.softmax_tree){
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if(map){
for(j = 0; j < 200; ++j){
float prob = scale*predictions[class_index+map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else {
for(j = l.classes - 1; j >= 0; --j){
if(!found && predictions[class_index + j] > .5){
found = 1;
} else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index+j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
} else {
for(j = 0; j < l.classes; ++j){
float prob = scale*predictions[class_index+j];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
if(only_objectness){
probs[index][0] = scale;
}
}
}
}
#ifdef GPU
void forward_region_layer_gpu(const region_layer l, network_state state)
{
/*
if(!state.train){
copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
return;
}
*/
flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu);
if(l.softmax_tree){
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count);
count += group_size;
}
}else if (l.softmax){
softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5);
}
float* in_cpu = (float*)calloc(l.batch * l.inputs, sizeof(float));
float *truth_cpu = 0;
if(state.truth){
int num_truth = l.batch*l.truths;
truth_cpu = (float*)calloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
}
cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs);
//cudaStreamSynchronize(get_cuda_stream());
network_state cpu_state = state;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
forward_region_layer(l, cpu_state);
//cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
free(cpu_state.input);
if(!state.train) return;
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
//cudaStreamSynchronize(get_cuda_stream());
if(cpu_state.truth) free(cpu_state.truth);
}
void backward_region_layer_gpu(region_layer l, network_state state)
{
flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta);
}
#endif
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w = 0;
int new_h = 0;
if (((float)netw / w) < ((float)neth / h)) {
new_w = netw;
new_h = (h * netw) / w;
}
else {
new_h = neth;
new_w = (w * neth) / h;
}
for (i = 0; i < n; ++i) {
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw);
b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth);
b.w *= (float)netw / new_w;
b.h *= (float)neth / new_h;
if (!relative) {
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i, j, n, z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w / 2; ++i) {
for (n = 0; n < l.n; ++n) {
for (z = 0; z < l.classes + l.coords + 1; ++z) {
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if (z == 0) {
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for (i = 0; i < l.outputs; ++i) {
l.output[i] = (l.output[i] + flip[i]) / 2.;
}
}
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
for (n = 0; n < l.n; ++n) {
int index = n*l.w*l.h + i;
for (j = 0; j < l.classes; ++j) {
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if (dets[index].mask) {
for (j = 0; j < l.coords - 4; ++j) {
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if (l.softmax_tree) {
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h);
if (map) {
for (j = 0; j < 200; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
else {
int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
dets[index].prob[j] = (scale > thresh) ? scale : 0;
}
}
else {
if (dets[index].objectness) {
for (j = 0; j < l.classes; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void zero_objectness(layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i) {
for (n = 0; n < l.n; ++n) {
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
l.output[obj_index] = 0;
}
}
}
|
parallelDeterminant2.c | // C program to find Deteminant of a matrix
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
// print matrix
void printNxNMatrix(double** matrix, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
// printf("%f, ",matrix[i][j]);
}
// printf("\n");
}
// printf("\n");
}
// Function to get determinant of matrix
int determinantOfMatrix(double** matrix, int n, int p)
{
int index = 0;
double num1,num2,det = 1,total = 1; // Initialize result
// temporary array for storing row
double* temp = (double*)malloc((n+1)*sizeof(double));
//loop for traversing the diagonal elements
for(int i = 0; i < n; i++)
{
index = i; // initialize the index
// stop when there is non zero value
while(matrix[index][i] == (double)0 && index < n) {
index++;
}
if(index == n){
// the determinat of matrix as zero
continue;
}
// printf("index = %d when i = %d.\n",index,i);
if(index != i){
//loop for swaping the diagonal element row and index row
#pragma omp parallel for schedule(dynamic) num_threads(p)
for(int j = 0; j < n; j++){
// swap(mat[index][j],mat[i][j]);
double temp0 = matrix[index][j];
matrix[index][j] = matrix[i][j];
matrix[i][j] = temp0;
}
//determinant sign changes when we shift rows
//go through determinant properties
det = det*pow(-1,index-i);
}
// printf("det = %f",det);
#pragma omp parallel for num_threads(p)
for(int j = 0; j < n; j++) //storing the values of diagonal row elements
temp[j] = matrix[i][j];
// printf("matrix before traversing:\n");
// printfNxNMatrix(matrix, n);
// #pragma omp parallel for
{
for(int j = i+1; j < n; j++)//traversing every row below the diagonal element
{
num1 = temp[i]; //value of diagonal element
num2 = matrix[j][i]; //value of next row element
//traversing every column of row
// and multiplying to every row
#pragma omp parallel for num_threads(p)
for(int k = 0; k < n; k++)
{
//multiplying to make the diagonal
// element and next row element equal
matrix[j][k] = (num1 * matrix[j][k]) - (num2 * temp[k]);
// printf("matrix[j=%d][k=%d] = (num1=%f * matrix[j][k]) - (num2=%f * temp[k]=%f) = %f;\n",j,k,num1,num2,temp[k],matrix[j][k]);
// // printfNxNMatrix(matrix, n);
}
}
}
// printf("matrix after traversing and get total = %f:\n",total);
// printfNxNMatrix(matrix, n);
}
total=1;
//mulitplying the diagonal elements to get total
#pragma omp parallel for reduction(*:total)
for(int i = 0; i < n-1; i++){
int power = n-i-1;
total *= pow(matrix[i][i],power);
}
// printf("now total = %f:\n",total);
//mulitplying the diagonal elements to get determinant
#pragma omp parallel for reduction(*:det)
for(int i = 0; i < n; i++)
det = det * matrix[i][i];
return (det/total); //Det(kA)/k=Det(A);
}
// Driver code
int main() {
int i, j, n;
FILE *fp;
// read in file1
fp = fopen("largerNxN1.txt", "r");
fscanf(fp, "%i", &(n));
// printf("n is %i \n", n);
double** matrix = (double**)malloc(n*sizeof(double*));
for (i = 0; i<n; i++){
matrix[i] = (double*)malloc((n)*sizeof(double));
}
for (i = 0; i < n; ++i) {
for (j = 0;j < n; ++j) {
fscanf(fp, "%lf", &matrix[i][j]);
}
}
fclose(fp);
printNxNMatrix(matrix, n);
// printf("Finding Determinants...\n");
double t = omp_get_wtime();
determinantOfMatrix(matrix, n, 6);
printf("excution time is %0.5lf seconds\n", (omp_get_wtime()-t));
// printf("Input matrix:\n");
// printfNxNMatrix(matrix, n);
// printf("finding determinant....\n");
printf("Determinant of the matrix is : %d\n", determinantOfMatrix(matrix, n, 4));
// /*int mat[N][N] = {{6, 1, 1},
// {4, -2, 5},
// {2, 8, 7}}; */
// int mat[N][N] = {{1, 0, 2, -1},
// {3, 0, 0, 5},
// {2, 1, 4, -3},
// {1, 0, 5, 0}
// }; -> det = 30
return 0;
}
|
1.race10.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for schedule(guided, 4)
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i - 1][j - 1];
}
// CHECK: Data Race detected
// END
|
matrixio.c | /// \file
/// Matrix I/O.
#include "matrixio.h"
#include <stdio.h>
#include <math.h>
#include "sparseMatrix.h"
#include "constants.h"
/// \details
/// Write out sparsity from sparse matrix.
void writeSparsePattern(char* fname, struct SparseMatrixSt* spmatrix, real_t hthresh)
{
char hrow[spmatrix->hsize];
FILE* sFile;
sFile = fopen(fname, "w");
#pragma omp parallel for
for (int i = 0; i < spmatrix->hsize; i++)
{
for (int j = 0; j < spmatrix->hsize; j++)
{
hrow[j] = '.';
}
for (int j = 0; j < spmatrix->iia[i]; j++)
{
if (ABS(spmatrix->val[i][j]) > hthresh)
{
hrow[spmatrix->jja[i][j]] = '*';
}
}
for (int j = 0; j < spmatrix->hsize;j++)
{
fprintf(sFile, "%c", hrow[j]);
}
fprintf(sFile, "\n");
}
fclose(sFile);
}
/// \details
/// Read in hamiltonian matrix from file in Matrix Market format.
void readMTX(char* fname, struct SparseMatrixSt* hmatrix)
{
int hvalue, msum, irow, icol, ind;
char line[100], header1[20], header2[20], header3[20], header4[20], header5[20];
double value;
FILE* hFile;
hFile = fopen(fname, "r");
// Read in header
fscanf(hFile, "%s %s %s %s %s", header1, header2, header3, header4, header5);
// Read in dimensions of matrix as dense and the number of sparse elements
fscanf(hFile, "%d %d %d", &hvalue, &hvalue, &msum);
// Read in elements for sparse matrix
// Read in as 1-based
for (int i = 0; i < msum; i++)
{
fscanf(hFile, "%d %d %lg", &irow, &icol, &value);
irow--; icol--;
ind = hmatrix->iia[irow];
hmatrix->jja[irow][ind] = icol;
hmatrix->val[irow][ind] = value;
hmatrix->iia[irow]++;
}
fclose(hFile);
}
/// \details
/// Write out sparse matrix in Matrix market format.
void writeMTX(char* fname, struct SparseMatrixSt* spmatrix)
{
FILE* mFile;
int msum;
mFile = fopen(fname, "w");
// Write header
fprintf(mFile, "\%\%\%MatrixMarket matrix coordinate real general\n");
// Collect number of non-zero elements
// Write out matrix size as dense and number of non-zero elements
msum = 0;
for (int i = 0; i < spmatrix->hsize; i++)
{
msum += spmatrix->iia[i];
}
fprintf(mFile, "%d %d %d\n", spmatrix->hsize, spmatrix->hsize, msum);
// Write out non-zero elements
for (int i = 0; i < spmatrix->hsize; i++)
{
for (int j = 0; j < spmatrix->iia[i]; j++)
{
fprintf(mFile, "%d %d %lg\n", i+1, spmatrix->jja[i][j]+1, spmatrix->val[i][j]);
}
}
fclose(mFile);
}
|
GConvolver.h | #pragma once
//#define GUITARD_CONV_THREAD_POOL
//#define GUITARD_CONV_OPENMP
#ifdef GUITARD_CONV_OPENMP
#include <omp.h>
#endif
#include "../GConfig.h"
#include "./GTypes.h"
/**
* Decide which type the convolution will use
*/
#ifndef GUITARD_FLOAT_CONVOLUTION
#define FFTCONVOLVER_TYPE guitard::sample
#else
/**
* If it's float the convolver can do sse
*/
#define FFTCONVOLVER_TYPE float
#ifdef GUITARD_SSE
#define FFTCONVOLVER_USE_SSE
#endif
#endif
/**
* Figure out whether there needs to be a conversion
*/
#if defined(GUITARD_FLOAT_CONVOLUTION) && defined(SAMPLE_TYPE_FLOAT) || !defined(GUITARD_FLOAT_CONVOLUTION) && !defined(SAMPLE_TYPE_FLOAT)
#define GUITARD_CONV_SAME_TYPE
#endif
#include "../../thirdparty/convolver/twoStageConvolver.h"
#ifdef GUITARD_CONV_THREAD_POOL
#include "../../thirdparty/threadpool.h"
#endif
namespace guitard {
/**
* Wraps up the FttConvolver to do easy stereo convolution
* and also deal with the buffers
*/
class WrappedConvolver {
const int CONV_BLOCK_SIZE = 128;
const int CONV_TAIL_BLOCK_SIZE = 1024 * 4;
static const int CHANNEL_COUNT = 2;
int mMaxBuffer = 0;
#ifdef GUITARD_CONV_THREAD_POOL
ctpl::thread_pool mPool;
#endif
/** We'll only do stereo convolution at most */
fftconvolver::TwoStageFFTConvolver mConvolvers[CHANNEL_COUNT];
#ifndef GUITARD_CONV_SAME_TYPE
/** Buffers need to be converted from double to float */
FFTCONVOLVER_TYPE mConversionBufferIn[CHANNEL_COUNT][GUITARD_MAX_BUFFER];
FFTCONVOLVER_TYPE mConversionBufferOut[CHANNEL_COUNT][GUITARD_MAX_BUFFER];
#endif
bool mIRLoaded = false;
const int maxBuffer;
bool mIsProcessing = false;
public:
bool mStereo = false;
GUITARD_NO_COPY(WrappedConvolver)
explicit WrappedConvolver(const int maxBuffer = 512): maxBuffer(maxBuffer) {
#ifdef GUITARD_CONV_THREAD_POOL
mPool.resize(1);
#endif
mMaxBuffer = maxBuffer;
}
void loadIR(float** samples, const size_t sampleCount, const size_t channelCount) {
if (samples == nullptr || sampleCount == 0 || channelCount == 0) { return; }
mIRLoaded = false;
while (mIsProcessing) {}
for (int c = 0; c < channelCount; c++) {
if (channelCount == 1) {
for (int ch = 0; ch < CHANNEL_COUNT; ch++) {
mConvolvers[ch].init(CONV_BLOCK_SIZE, CONV_TAIL_BLOCK_SIZE, samples[0], sampleCount);
}
}
else if (channelCount == CHANNEL_COUNT) {
mConvolvers[c].init(CONV_BLOCK_SIZE, CONV_TAIL_BLOCK_SIZE, samples[c], sampleCount);
}
}
mIRLoaded = true;
}
void ProcessBlock(sample** in, sample** out, const int nFrames) {
if (!mIRLoaded) { // kust pass the signal through
for (int c = 0; c < CHANNEL_COUNT; c++) {
for (int i = 0; i < nFrames; i++) {
out[c][i] = in[c][i];
}
}
return;
}
mIsProcessing = true;
#ifdef GUITARD_CONV_THREAD_POOL
/** THREADPOOL TEST */
std::future<void> right;
if (mStereo) { // do the conv on the second channel
right = mPool.push([&](int id) {
processChannel(in, out, nFrames, 1);
});
}
processChannel(in, out, nFrames, 0);
if (mStereo) {
right.wait(); // wait for the second channel result
}
else {
::memcpy(out[1], out[0], nFrames * sizeof(sample));
}
#else
#ifdef GUITARD_CONV_OPENMP
/** OPENMP TEST */
if (mStereo) {
#pragma omp parallel num_threads(2)
#pragma omp for
for (int n = 0; n < 2; ++n) {
processChannel(in, out, nFrames, n);
}
}
else {
processChannel(in, out, nFrames, 0);
::memcpy(out[1], out[0], nFrames * sizeof(sample));
}
#else
/** REFERENCE */
processChannel(in, out, nFrames, 0);
if (!mStereo) { // mono needs the other channel filled too
::memcpy(out[1], out[0], nFrames * sizeof(sample));
}
else { // else do the second channel as well
processChannel(in, out, nFrames, 1);
}
#endif
#endif
mIsProcessing = false;
}
static String getLicense() {
String l = "Realtime Convolution by\n";
l += "https://github.com/HiFi-LoFi\n";
l += "https://github.com/HiFi-LoFi/FFTConvolver\n";
l += "MIT License\n\n";
l += "Wave file reader \"dr_wav\"";
l += "https://github.com/mackron\n";
l += "https://github.com/mackron/dr_libs/blob/master/dr_wav.h\n";
l += "Public domain";
return l;
}
private:
inline void processChannel(sample** in, sample** out, const int nFrames, int channel) {
#ifdef GUITARD_CONV_SAME_TYPE
mConvolvers[channel].process(in[channel], out[channel], nFrames); // no conversion needed
#else
for (int i = 0; i < nFrames; i++) {
mConversionBufferIn[channel][i] = static_cast<float>(in[channel][i]);
}
mConvolvers[channel].process(mConversionBufferIn[channel], mConversionBufferOut[channel], nFrames);
for (int i = 0; i < nFrames; i++) {
out[channel][i] = mConversionBufferOut[channel][i];
}
#endif
}
};
} |
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#if QMC_BUILD_LEVEL<5
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#include <qmc_common.h>
#endif
#include "Particle/DistanceTableData.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
struct J2OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType=FT;
///type of each component U, dU, d2U;
using valT=typename FT::real_type;
///element position type
using posT=TinyVector<valT,OHMMS_DIM>;
///use the same container
using RowContainer=DistanceTableData::RowContainer;
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///task id
int TaskID;
///Used to compute correction
bool FirstTime;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
using gContainer_type=VectorSoaContainer<valT,OHMMS_DIM>;
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
///Uniquue J2 set for cleanup
std::map<std::string,FT*> J2Unique;
J2OrbitalSoA(ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs)=delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
void resetTargetParticleSet(ParticleSet& P)
{
if(dPsi)
dPsi->resetTargetParticleSet(P);
}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable=myVars.is_optimizable();
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if(dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if(!Optimizable)
return;
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if(dPsi)
dPsi->resetParameters( active );
for(int i=0; i<myVars.size(); ++i)
{
int ii=myVars.Index[i];
if(ii>=0)
myVars[i]= active[ii];
}
}
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string,FT*>::iterator it(J2Unique.begin()),it_end(J2Unique.end());
while(it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
ChiesaKEcorrection();
}
RealType ChiesaKEcorrection() { return RealType();}
/**@} */
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
RealType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
ValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for(int k=0; k<ratios.size(); ++k)
ratios[k]=std::exp(Uat[VP.refPtcl] -
computeU(VP.refPS, VP.refPtcl, VP.DistTables[0]->Distances[k]));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch=false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if ( Bytes_in_WFBuffer == 0 )
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current()-Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded*OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch=false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist)
{
valT curUat(0);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle=false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const
{
posT grad;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<N; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
return grad;
}
/**@} */
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : TaskID(tid)
{
init(p);
FirstTime =true;
KEcorr=0.0;
OrbitalName = "J2OrbitalSoA";
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it=J2Unique.begin();
while(it != J2Unique.end())
{
delete ((*it).second);
++it;
}
}//need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N=p.getTotalNum();
N_padded=getAlignedSize<valT>(N);
NumGroups=p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups*NumGroups,nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if(ia==ib)
{
if(ia==0)//first time, assign everything
{
int ij=0;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg, ++ij)
if(F[ij]==nullptr) F[ij]=j;
}
else
F[ia*NumGroups+ib]=j;
}
else
{
if(N==2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=0; jg<NumGroups; ++jg)
F[ig*NumGroups+jg]=j;
}
else
{
// generic case
F[ia*NumGroups+ib]=j;
F[ib*NumGroups+ia]=j;
}
}
std::stringstream aname;
aname<<ia<<ib;
J2Unique[aname.str()]=j;
//ChiesaKEcorrection();
FirstTime = false;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy=new J2OrbitalSoA<FT>(tqp,-1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*,FT*> fcmap;
for(int ig=0; ig<NumGroups; ++ig)
for(int jg=ig; jg<NumGroups; ++jg)
{
int ij=ig*NumGroups+jg;
if(F[ij]==0)
continue;
typename std::map<const FT*,FT*>::iterator fit=fcmap.find(F[ij]);
if(fit == fcmap.end())
{
FT* fc=new FT(*F[ij]);
j2copy->addFunc(ig,jg,fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]]=fc;
}
}
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void
J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const RealType* restrict dist,
RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle)
{
const int jelmax=triangle?iat:N;
constexpr valT czero(0);
std::fill_n(u, jelmax,czero);
std::fill_n(du, jelmax,czero);
std::fill_n(d2u,jelmax,czero);
const int igt=P.GroupID[iat]*NumGroups;
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax,P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode=ORB_PBYP_RATIO;
cur_Uat=computeU(P, iat, P.DistTables[0]->Temp_r.data());
return std::exp(Uat[iat]-cur_Uat);
}
template<typename FT>
inline void
J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const DistanceTableData* d_table=P.DistTables[0];
const auto dist=d_table->Temp_r.data();
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
valT sumU(0);
for(int jg=0; jg<NumGroups; ++jg)
{
const FuncType& f2(*F[igt+jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data());
}
for(int i=P.first(ig); i<P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt+ig]->evaluate(dist[i]);
ratios[i]=std::exp(Uat[i]+Uself-sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType
J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType
J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode=ORB_PBYP_PARTIAL;
computeU3(P,iat,P.DistTables[0]->Temp_r.data(), cur_u.data(),cur_du.data(),cur_d2u.data());
cur_Uat=simd::accumulate_n(cur_u.data(),N,valT());
DiffVal=Uat[iat]-cur_Uat;
grad_iat+=accumulateG(cur_du.data(),P.DistTables[0]->Temp_dr);
return std::exp(DiffVal);
}
template<typename FT>
void
J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat)
{
// get the old u, du, d2u
const DistanceTableData* d_table=P.DistTables[0];
computeU3(P,iat,d_table->Distances[iat],old_u.data(),old_du.data(),old_d2u.data());
if(UpdateMode == ORB_PBYP_RATIO)
{//ratio-only during the move; need to compute derivatives
const auto dist=d_table->Temp_r.data();
computeU3(P,iat,dist,cur_u.data(),cur_du.data(),cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr=d_table->Temp_dr;
const auto& old_dr=d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:cur_d2Uat)
for(int jat=0; jat<N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac*cur_du[jat];
const valT dl = old_d2u[jat] + lapfac*old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict new_dX=new_dr.data(idim);
const valT* restrict old_dX=old_dr.data(idim);
const valT* restrict cur_du_pt=cur_du.data();
const valT* restrict old_du_pt=old_du.data();
valT* restrict save_g=dUat.data(idim);
valT cur_g=cur_dUat[idim];
#pragma omp simd reduction(+:cur_g) aligned(old_dX,new_dX,save_g,cur_du_pt,old_du_pt)
for(int jat=0; jat<N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat]*old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat]-cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void
J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const DistanceTableData* d_table=P.DistTables[0];
for(int ig=0; ig<NumGroups; ++ig)
{
const int igt=ig*NumGroups;
for(int iat=P.first(ig),last=P.last(ig); iat<last; ++iat)
{
computeU3(P,iat,d_table->Distances[iat],cur_u.data(),cur_du.data(),cur_d2u.data(),true);
Uat[iat]=simd::accumulate_n(cur_u.data(),iat,valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const RowContainer& displ = d_table->Displacements[iat];
constexpr valT lapfac=OHMMS_DIM-RealType(1);
#pragma omp simd reduction(+:lap) aligned(du,d2u)
for(int jat=0; jat<iat; ++jat)
lap+=d2u[jat]+lapfac*du[jat];
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
const valT* restrict dX=displ.data(idim);
valT s=valT();
#pragma omp simd reduction(+:s) aligned(du,dX)
for(int jat=0; jat<iat; ++jat) s+=du[jat]*dX[jat];
grad[idim]=s;
}
dUat(iat)=grad;
d2Uat[iat]=-lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u,du,d2u)
for(int jat=0; jat<iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat]+lapfac*du[jat];
}
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT* restrict save_g=dUat.data(idim);
const valT* restrict dX=displ.data(idim);
#pragma omp simd aligned(save_g,du,dX)
for(int jat=0; jat<iat; jat++)
save_g[jat]-=du[jat]*dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::RealType
J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P,G,L,true);
return LogValue;
}
template<typename FT>
void
J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L, bool fromscratch)
{
if(fromscratch) recompute(P);
LogValue=valT(0);
for(int iat=0; iat<N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
constexpr valT mhalf(-0.5);
LogValue=mhalf*LogValue;
}
}
#endif
|
SpVec.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SPVEC_H_
#define SRC_SPVEC_H_
#include <string>
#include <algorithm>
#include "src/DenseSegment.h"
#include "src/CSSegment.h"
template <typename SpSegment>
class SpVec {
public:
SpSegment * segments;
int* start_id;
int* nodeIds;
int nsegments;
int n;
bool empty;
std::string name;
int num_tiles_x;
int (*pfn)(int, int, int);
SpVec() { empty = true; }
void alloc(int _n, int _nsegments, int* _nodeIds, int* _start_id) {
// Copy metadata
empty = false;
start_id =
reinterpret_cast<int*>(_mm_malloc((_nsegments + 1) * sizeof(int), 64));
nodeIds =
reinterpret_cast<int*>(_mm_malloc((_nsegments) * sizeof(int), 64));
memcpy(start_id, _start_id, (_nsegments + 1) * sizeof(int));
memcpy(nodeIds, _nodeIds, (_nsegments) * sizeof(int));
n = _n;
nsegments = _nsegments;
assert(nsegments > 0);
// Allocate space for tiles
segments = new SpSegment[nsegments];
for (int j = 0; j < nsegments; j++) {
segments[j] = SpSegment(start_id[j + 1] - start_id[j]);
}
}
inline int getPartition(int src) const {
for (int i = 0; i < nsegments; i++) {
if ((src > start_id[i]) && (src <= start_id[i + 1])) {
return i;
}
}
return -1;
}
template <typename T>
void ingestEdgelist(edgelist_t<T> blob) {
int nnz_l = blob.nnz;
edge_t<T>* edge_list = blob.edges;
int m = blob.m;
assert(blob.n == 1);
printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz);
// Done with partitioning
// Now, assign.
int* assignment = new int[nnz_l];
#pragma omp parallel for
for (int i = 0; i < nnz_l; i++) {
int tile = getPartition(edge_list[i].src);
assert(tile != -1);
assignment[i] = nodeIds[tile];
}
// assignment over
MPI_Barrier(MPI_COMM_WORLD);
// pack into messages
// calculate message sizes
int* count = new int[global_nrank];
int* recv_count = new int[global_nrank];
MPI_Request* mpi_req = new MPI_Request[2 * global_nrank];
MPI_Status* mpi_status = new MPI_Status[2 * global_nrank];
memset(count, 0, sizeof(int) * global_nrank);
for (int i = 0; i < nnz_l; i++) {
int r = assignment[i];
count[r]++;
}
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(&count[i], 1, MPI_INT, i, global_myrank, MPI_COMM_WORLD,
&mpi_req[i]);
}
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&recv_count[i], 1, MPI_INT, i, i, MPI_COMM_WORLD,
&mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
// pack the messages and send
edge_t<T>** msg = new edge_t<T>* [global_nrank];
int* offsets = new int[global_nrank];
for (int i = 0; i < global_nrank; i++) {
msg[i] = new edge_t<T>[count[i]];
offsets[i] = 0;
}
for (int i = 0; i < nnz_l; i++) {
int r = assignment[i];
msg[r][offsets[r]] = edge_list[i];
++offsets[r];
}
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(msg[i], (uint64_t)sizeof(edge_t<T>) * (uint64_t)count[i],
MPI_CHAR, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]);
}
// receive messages into final_edge_list
int new_nnz = 0;
int* local_hist = new int[global_nrank + 1];
local_hist[0] = 0;
for (int i = 0; i < global_nrank; i++) {
new_nnz += recv_count[i];
local_hist[i + 1] = local_hist[i] + recv_count[i];
}
edge_t<T>* final_edge_list = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(edge_t<T>), 64));
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&final_edge_list[local_hist[i]],
(uint64_t)sizeof(edge_t<T>) * (uint64_t)recv_count[i], MPI_CHAR,
i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
for (int i = 0; i < global_nrank; i++) {
delete[] msg[i];
}
delete[] msg;
delete[] local_hist;
delete[] offsets;
delete[] count;
delete[] recv_count;
delete[] mpi_req;
delete[] mpi_status;
printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz);
for (int i = 0; i < new_nnz; i++) {
int ival, jval;
int tile = getPartition(final_edge_list[i].src);
assert(tile != -1);
assert(nodeIds[tile] == global_myrank);
}
MPI_Barrier(MPI_COMM_WORLD);
// Sort these edges by segment ID
edge_t<T>* edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(edge_t<T>), 64));
int* partitions = reinterpret_cast<int*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(int), 64));
uint64_t* counts = reinterpret_cast<uint64_t*>(
_mm_malloc((nsegments) * sizeof(uint64_t), 64));
uint64_t* start_nzs = reinterpret_cast<uint64_t*>(
_mm_malloc((nsegments + 1) * sizeof(uint64_t), 64));
memset(counts, 0, (nsegments) * sizeof(uint64_t));
memset(start_nzs, 0, (nsegments+1) * sizeof(uint64_t));
for (uint64_t i = 0; i < (uint64_t)new_nnz; i++) {
partitions[i] = getPartition(final_edge_list[i].src);
counts[partitions[i]]++;
}
uint64_t acc = 0;
for (int i = 0; i < nsegments; i++) {
start_nzs[i] = acc;
acc += counts[i];
}
start_nzs[nsegments] = acc;
memset(counts, 0, (nsegments) * sizeof(uint64_t));
for (uint64_t i = 0; i < (uint64_t)new_nnz; i++) {
int new_idx = start_nzs[partitions[i]] + counts[partitions[i]];
assert(new_idx < new_nnz);
assert(new_idx >= 0);
assert(partitions[i] < nsegments);
assert(partitions[i] >= 0);
edges[new_idx] = final_edge_list[i];
counts[partitions[i]]++;
}
if(new_nnz > 0)
{
_mm_free(final_edge_list);
_mm_free(partitions);
}
for (int segment_i = 0; segment_i < nsegments; segment_i++) {
if (nodeIds[segment_i] == global_myrank) {
/*
std::cout << "Node: " << global_myrank
<< " processing tile: " << segment_i << std::endl;
*/
int tile_m = start_id[segment_i + 1] - start_id[segment_i];
int nnz = counts[segment_i];
int start_nz = start_nzs[segment_i];
assert(start_nz <= new_nnz);
assert(nnz <= new_nnz);
if (nnz <= 0) {
segments[segment_i] = SpSegment(tile_m);
std::stringstream ss;
ss << "LoadedEmpty_" << segment_i;
segments[segment_i].name = ss.str();
} else {
segments[segment_i] =
SpSegment(edges + start_nz, tile_m, nnz, start_id[segment_i]);
std::stringstream ss;
ss << "Loaded_" << segment_i;
segments[segment_i].name = ss.str();
}
}
}
_mm_free(counts);
_mm_free(start_nzs);
_mm_free(edges);
MPI_Barrier(MPI_COMM_WORLD);
}
void AllocatePartitioned(int n, int _num_tiles_x,
int (*_pfn)(int, int, int)) {
num_tiles_x = _num_tiles_x;
pfn = _pfn;
int vx, vy;
int roundup = 256;
int nsegments = num_tiles_x;
vx =
((((n + nsegments - 1) / nsegments) + roundup - 1) / roundup) * roundup;
// In case the roundup affected the num tiles
int* nodeIds =
reinterpret_cast<int*>(_mm_malloc(num_tiles_x * sizeof(int), 64));
int* startx =
reinterpret_cast<int*>(_mm_malloc((num_tiles_x + 1) * sizeof(int), 64));
for (int j = 0; j < num_tiles_x; j++) {
nodeIds[j] = pfn(j, num_tiles_x, global_nrank);
}
for (int j = 0; j < num_tiles_x; j++) {
startx[j] = std::min(vx * j, n);
}
startx[num_tiles_x] = n;
alloc(n, num_tiles_x, nodeIds, startx);
_mm_free(nodeIds);
_mm_free(startx);
}
template<typename T>
void set(int idx, T val) {
assert(!empty);
int partitionId = getPartition(idx);
assert(partitionId >= 0);
if (nodeIds[partitionId] == global_myrank) {
assert(segments[partitionId].capacity > 0);
segments[partitionId].set(idx - start_id[partitionId], val);
}
}
template<typename T>
void setAll(T val) {
assert(!empty);
for(int segmentId = 0 ; segmentId < nsegments ; segmentId++)
{
if(nodeIds[segmentId] == global_myrank)
{
segments[segmentId].setAll(val);
}
}
}
template<typename T>
void get(const int idx, T * myres) const {
assert(!empty);
int partitionId = getPartition(idx);
assert(partitionId >= 0);
if (nodeIds[partitionId] == global_myrank) {
SpSegment segment = segments[partitionId];
*myres = segment.get(idx - start_id[partitionId]);
}
}
int getNNZ()
{
int total_nnz = 0;
for(int s = 0 ; s < nsegments ; s++)
{
if(nodeIds[s] == global_myrank)
{
//total_nnz += segments[s].getNNZ();
total_nnz += segments[s].compute_nnz();
}
}
// global reduction
MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return total_nnz;
}
bool node_owner(const int idx) const {
assert(!empty);
int partitionId = getPartition(idx);
assert(partitionId >= 0);
bool v;
if (nodeIds[partitionId] == global_myrank) {
v = true;
} else {
v = false;
}
return v;
}
void printStatus() const {
if(global_myrank == 0)
{
std::cout << "nsegments: " << nsegments << std::endl;
}
MPI_Barrier(MPI_COMM_WORLD);
for(int segment = 0 ; segment < nsegments ; segment++)
{
if(nodeIds[segment] == global_myrank)
{
std::cout << "nodeID, segment, allocated, uninitialized: " << global_myrank << "\t" << segment << "\t" << segments[segment].properties.allocated << "\t" << segments[segment].properties.uninitialized << std::endl;
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
};
#endif // SRC_SPVEC_H_
|
client_utils.h | // Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef CLIENT_UTILS_H
#define CLIENT_UTILS_H
#include <algorithm>
#include <complex>
#include <iostream>
#include <mutex>
#include <numeric>
#include <omp.h>
#include <random>
#include <tuple>
#include <vector>
#include "../shared/printbuffer.h"
#include "rocfft.h"
#include <hip/hip_runtime_api.h>
// Determine the size of the data type given the precision and type.
template <typename Tsize>
inline Tsize var_size(const rocfft_precision precision, const rocfft_array_type type)
{
size_t var_size = 0;
switch(precision)
{
case rocfft_precision_single:
var_size = sizeof(float);
break;
case rocfft_precision_double:
var_size = sizeof(double);
break;
}
switch(type)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
var_size *= 2;
break;
default:
break;
}
return var_size;
}
// Container class for test parameters.
class rocfft_params
{
public:
// All parameters are row-major.
std::vector<size_t> length;
std::vector<size_t> istride;
std::vector<size_t> ostride;
size_t nbatch = 1;
rocfft_precision precision = rocfft_precision_double;
rocfft_transform_type transform_type = rocfft_transform_type_complex_forward;
rocfft_result_placement placement = rocfft_placement_inplace;
size_t idist = 0;
size_t odist = 0;
rocfft_array_type itype = rocfft_array_type_complex_interleaved;
rocfft_array_type otype = rocfft_array_type_complex_interleaved;
std::vector<size_t> ioffset = {0, 0};
std::vector<size_t> ooffset = {0, 0};
std::vector<size_t> isize;
std::vector<size_t> osize;
// Given an array type, return the name as a string.
std::string array_type_name(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_complex_interleaved:
return "rocfft_array_type_complex_interleaved";
case rocfft_array_type_complex_planar:
return "rocfft_array_type_complex_planar";
case rocfft_array_type_real:
return "rocfft_array_type_real";
case rocfft_array_type_hermitian_interleaved:
return "rocfft_array_type_hermitian_interleaved";
case rocfft_array_type_hermitian_planar:
return "rocfft_array_type_hermitian_planar";
case rocfft_array_type_unset:
return "rocfft_array_type_unset";
}
return "";
}
// Convert to string for output.
std::string str() const
{
std::stringstream ss;
ss << "\nparams:\n";
ss << "\tlength:";
for(auto i : length)
ss << " " << i;
ss << "\n";
ss << "\tistride:";
for(auto i : istride)
ss << " " << i;
ss << "\n";
ss << "\tidist: " << idist << "\n";
ss << "\tostride:";
for(auto i : ostride)
ss << " " << i;
ss << "\n";
ss << "\todist: " << odist << "\n";
ss << "\tbatch: " << nbatch << "\n";
ss << "\tisize:";
for(auto i : isize)
ss << " " << i;
ss << "\n";
ss << "\tosize:";
for(auto i : osize)
ss << " " << i;
ss << "\n";
ss << "\tioffset:";
for(auto i : ioffset)
ss << " " << i;
ss << "\n";
ss << "\tooffset:";
for(auto i : ooffset)
ss << " " << i;
ss << "\n";
if(placement == rocfft_placement_inplace)
ss << "\tin-place\n";
else
ss << "\tout-of-place\n";
ss << "\t" << array_type_name(itype) << " -> " << array_type_name(otype) << "\n";
if(precision == rocfft_precision_single)
ss << "\tsingle-precision\n";
else
ss << "\tdouble-precision\n";
ss << "\tilength:";
for(const auto i : ilength())
ss << " " << i;
ss << "\n";
ss << "\tolength:";
for(const auto i : olength())
ss << " " << i;
ss << "\n";
return ss.str();
}
// Dimension of the transform.
size_t dim() const
{
return length.size();
}
std::vector<size_t> ilength() const
{
auto ilength = length;
if(transform_type == rocfft_transform_type_real_inverse)
ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1;
return ilength;
}
std::vector<size_t> olength() const
{
auto olength = length;
if(transform_type == rocfft_transform_type_real_forward)
olength[dim() - 1] = olength[dim() - 1] / 2 + 1;
return olength;
}
size_t nbuffer(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_real:
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
return 1;
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
return 2;
case rocfft_array_type_unset:
return 0;
}
}
// Number of input buffers
size_t nibuffer() const
{
return nbuffer(itype);
}
// Number of output buffers
size_t nobuffer() const
{
return nbuffer(otype);
}
size_t compute_isize() const
{
auto il = ilength();
size_t val = nbatch * idist;
for(int i = 0; i < il.size(); ++i)
{
val = std::max(val, il[i] * istride[i]);
}
return val;
}
size_t compute_osize() const
{
auto ol = olength();
size_t val = nbatch * odist;
for(int i = 0; i < ol.size(); ++i)
{
val = std::max(val, ol[i] * ostride[i]);
}
return val;
}
std::vector<size_t> ibuffer_sizes() const
{
std::vector<size_t> ibuffer_sizes;
if(isize.empty())
return ibuffer_sizes;
switch(itype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
ibuffer_sizes.resize(2);
break;
default:
ibuffer_sizes.resize(1);
}
for(unsigned i = 0; i < ibuffer_sizes.size(); i++)
{
ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype);
}
return ibuffer_sizes;
}
std::vector<size_t> obuffer_sizes() const
{
std::vector<size_t> obuffer_sizes;
if(osize.empty())
return obuffer_sizes;
switch(otype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
obuffer_sizes.resize(2);
break;
default:
obuffer_sizes.resize(1);
}
for(unsigned i = 0; i < obuffer_sizes.size(); i++)
{
obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype);
}
return obuffer_sizes;
}
// Estimate the amount of host memory needed.
size_t needed_ram(const int verbose) const
{
// Host input, output, and input copy: 3 buffers, all contiguous.
size_t needed_ram
= 3 * std::accumulate(length.begin(), length.end(), 1, std::multiplies<size_t>());
// GPU input buffer:
needed_ram += std::inner_product(length.begin(), length.end(), istride.begin(), 0);
// GPU output buffer:
needed_ram += std::inner_product(length.begin(), length.end(), ostride.begin(), 0);
// Account for precision and data type:
if(transform_type != rocfft_transform_type_real_forward
&& transform_type != rocfft_transform_type_real_inverse)
{
needed_ram *= 2;
}
switch(precision)
{
case rocfft_precision_single:
needed_ram *= 4;
break;
case rocfft_precision_double:
needed_ram *= 8;
break;
}
needed_ram *= nbatch;
if(verbose > 1)
{
std::cout << "required host memory (GB): " << needed_ram * 1e-9 << std::endl;
}
return needed_ram;
}
// Column-major getters:
std::vector<size_t> ilength_cm() const
{
auto ilength_cm = ilength();
std::reverse(std::begin(ilength_cm), std::end(ilength_cm));
return ilength_cm;
}
std::vector<size_t> olength_cm() const
{
auto olength_cm = olength();
std::reverse(std::begin(olength_cm), std::end(olength_cm));
return olength_cm;
}
std::vector<size_t> length_cm() const
{
auto length_cm = length;
std::reverse(std::begin(length_cm), std::end(length_cm));
return length_cm;
}
std::vector<size_t> istride_cm() const
{
auto istride_cm = istride;
std::reverse(std::begin(istride_cm), std::end(istride_cm));
return istride_cm;
}
std::vector<size_t> ostride_cm() const
{
auto ostride_cm = ostride;
std::reverse(std::begin(ostride_cm), std::end(ostride_cm));
return ostride_cm;
}
// Return true if the given GPU parameters would produce a valid transform.
bool valid(const int verbose) const
{
if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer())
return false;
// Check that in-place transforms have the same input and output stride:
if(placement == rocfft_placement_inplace)
{
const auto stridesize = std::min(istride.size(), ostride.size());
bool samestride = true;
for(int i = 0; i < stridesize; ++i)
{
if(istride[i] != ostride[i])
samestride = false;
}
if(!samestride)
{
// In-place transforms require identical input and output strides.
if(verbose)
{
std::cout << "istride:";
for(const auto& i : istride)
std::cout << " " << i;
std::cout << " ostride0:";
for(const auto& i : ostride)
std::cout << " " << i;
std::cout << " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
// TODO: mark skipped
return false;
}
if((transform_type == rocfft_transform_type_real_forward
|| transform_type == rocfft_transform_type_real_inverse)
&& (istride[0] != 1 || ostride[0] != 1))
{
// In-place real/complex transforms require unit strides.
if(verbose)
{
std::cout
<< "istride[0]: " << istride[0] << " ostride[0]: " << ostride[0]
<< " must be unitary for in-place real/complex transforms: skipping test"
<< std::endl;
}
return false;
}
if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved))
{
if(verbose)
{
std::cout << "In-place c2c transforms require identical io types; skipped.\n";
}
return false;
}
// Check offsets
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
for(int i = 0; i < nibuffer(); ++i)
{
if(ioffset[i] != ooffset[i])
return false;
}
break;
case rocfft_transform_type_real_forward:
if(ioffset[0] != 2 * ooffset[0])
return false;
break;
case rocfft_transform_type_real_inverse:
if(2 * ioffset[0] != ooffset[0])
return false;
break;
}
}
// The parameters are valid.
return true;
}
};
// This is used with the program_options class so that the user can type an integer on the
// command line and we store into an enum varaible
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_array_type& atype)
{
unsigned tmp;
stream >> tmp;
atype = rocfft_array_type(tmp);
return stream;
}
// similarly for transform type
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_transform_type& ttype)
{
unsigned tmp;
stream >> tmp;
ttype = rocfft_transform_type(tmp);
return stream;
}
// count the number of total iterations for 1-, 2-, and 3-D dimensions
template <typename T1>
size_t count_iters(const T1& i)
{
return i;
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i);
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i) * std::get<2>(i);
}
// Work out how many partitions to break our iteration problem into
template <typename T1>
static size_t compute_partition_count(T1 length)
{
#ifdef BUILD_CLIENTS_TESTS_OPENMP
// we seem to get contention from too many threads, which slows
// things down. particularly noticeable with mix_3D tests
static const size_t MAX_PARTITIONS = 8;
size_t iters = count_iters(length);
size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs()));
if(!hw_threads)
return 1;
// don't bother threading problem sizes that are too small. pick
// an arbitrary number of iterations and ensure that each thread
// has at least that many iterations to process
static const size_t MIN_ITERS_PER_THREAD = 2048;
// either use the whole CPU, or use ceil(iters/iters_per_thread)
return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD);
#else
return 1;
#endif
}
// Break a scalar length into some number of pieces, returning
// [(start0, end0), (start1, end1), ...]
template <typename T1>
std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
// make sure we don't exceed the length
num_parts = std::min(length, num_parts);
std::vector<std::pair<T1, T1>> ret(num_parts);
auto partition_size = length / num_parts;
T1 cur_partition = 0;
for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size)
{
ret[i].first = cur_partition;
ret[i].second = cur_partition + partition_size;
}
// last partition might not divide evenly, fix it up
ret.back().second = length;
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the leftmost part of the tuple, for row-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<2>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<2>(ret[i].second) = std::get<2>(length);
}
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the rightmost part of the tuple, for col-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_colmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<1>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<1>(ret[i].first) = partitions[i].first;
std::get<0>(ret[i].first) = 0;
std::get<1>(ret[i].second) = partitions[i].second;
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_colmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<2>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<2>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].first) = 0;
std::get<2>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
// Specialized computation of index given 1-, 2-, 3- dimension length + stride
template <typename T1, typename T2>
int compute_index(T1 length, T2 stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (length * stride) + base;
}
template <typename T1, typename T2>
int compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ base;
}
template <typename T1, typename T2>
int compute_index(const std::tuple<T1, T1, T1>& length,
const std::tuple<T2, T2, T2>& stride,
size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ (std::get<2>(length) * std::get<2>(stride)) + base;
}
// Given a length vector, set the rest of the strides.
// The optional argument stride0 sets the stride for the contiguous dimension.
// The optional rcpadding argument sets the stride correctly for in-place
// multi-dimensional real/complex transforms.
// Format is row-major.
template <typename T1>
inline std::vector<T1> compute_stride(const std::vector<T1>& length,
const std::vector<size_t>& stride0 = std::vector<size_t>(),
const bool rcpadding = false)
{
// We can't have more strides than dimensions:
assert(stride0.size() <= length.size());
const int dim = length.size();
std::vector<T1> stride(dim);
int dimoffset = 0;
if(stride0.size() == 0)
{
// Set the contiguous stride:
stride[dim - 1] = 1;
dimoffset = 1;
}
else
{
// Copy the input values to the end of the stride array:
for(int i = 0; i < stride0.size(); ++i)
{
stride[dim - stride0.size() + i] = stride0[i];
}
}
if(stride0.size() < dim)
{
// Compute any remaining values via recursion.
for(int i = dim - dimoffset - stride0.size(); i-- > 0;)
{
auto lengthip1 = length[i + 1];
if(rcpadding && i == dim - 2)
{
lengthip1 = 2 * (lengthip1 / 2 + 1);
}
stride[i] = stride[i + 1] * lengthip1;
}
}
return stride;
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input and output
// types are identical.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to1(const Tval* input,
Tval* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx] = input[idx];
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// planar and the output type is complex interleaved.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_2to1(const Tval* input0,
const Tval* input1,
std::complex<Tval>* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx] = std::complex<Tval>(input0[idx], input1[idx]);
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// complex interleaved and the output type is planar.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to2(const std::complex<Tval>* input,
Tval* output0,
Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output0[odx] = input[idx].real();
output1[odx] = input[idx].imag();
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type given
// by itype, and the output type is given by otype.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
// copy 1to2
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<float*>(output[0].data()),
reinterpret_cast<float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<double*>(output[0].data()),
reinterpret_cast<double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
// copy 2 to 1
switch(precision)
{
case rocfft_precision_single:
copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
}
// unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return copy_buffers(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
ioffset,
ooffset);
case 2:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
ioffset,
ooffset);
case 3:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of complex type.
struct VectorNorms
{
double l_2 = 0.0, l_inf = 0.0;
};
template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_complex(const Tcomplex* input,
const Tcomplex* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_colmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base) + ioffset[0];
const int odx
= idx_equals_odx ? idx : compute_index(index, ostride, odx_base) + ooffset[0];
const double rdiff = std::abs(output[odx].real() - input[idx].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff = std::abs(output[odx].imag() - input[idx].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of real type.
template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_real(const Tfloat* input,
const Tfloat* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base) + ioffset[0];
const int odx
= idx_equals_odx ? idx : compute_index(index, ostride, odx_base) + ooffset[0];
const double diff = std::abs(output[odx] - input[idx]);
cur_linf = std::max(diff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += diff * diff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. input is complex-interleaved, output is complex-planar.
template <typename Tval, typename Tint1, typename T2, typename T3>
inline VectorNorms distance_1to2(const std::complex<Tval>* input,
const Tval* output0,
const Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const T3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist && ioffset == ooffset;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base) + ioffset[0];
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff = std::abs(output0[odx + ooffset[0]] - input[idx].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff = std::abs(output1[odx + ooffset[1]] - input[idx].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-inifnity and L-2 distance between two buffers of dimension length and
// with types given by itype, otype, and precision.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
VectorNorms dist;
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms d;
switch(precision)
{
case rocfft_precision_single:
d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<const float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<const double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_inf = std::max(d.l_inf, dist.l_inf);
dist.l_2 += d.l_2 * d.l_2;
}
break;
default:
throw std::runtime_error("Invalid input and output types.");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const float*>(output[0].data()),
reinterpret_cast<const float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const double*>(output[0].data()),
reinterpret_cast<const double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()),
reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()),
reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
dist.l_2 = sqrt(dist.l_2);
return dist;
}
// Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return distance(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 2:
return distance(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 3:
return distance(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 norm of a buffer with strides istride and
// length idist. Data is std::complex.
template <typename Tcomplex, typename T1, typename T2>
inline VectorNorms norm_complex(const Tcomplex* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const double rval = std::abs(input[idx].real());
cur_linf = std::max(rval, cur_linf);
cur_l2 += rval * rval;
const double ival = std::abs(input[idx].imag());
cur_linf = std::max(ival, cur_linf);
cur_l2 += ival * ival;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data is real-valued.
template <typename Tfloat, typename T1, typename T2>
inline VectorNorms norm_real(const Tfloat* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const double val = std::abs(input[idx]);
cur_linf = std::max(val, cur_linf);
cur_l2 += val * val;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data format is given by precision and itype.
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const T1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
VectorNorms norm;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_2 *= norm.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms n;
switch(precision)
{
case rocfft_precision_single:
n = norm_real(reinterpret_cast<const float*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
n = norm_real(reinterpret_cast<const double*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_inf = std::max(n.l_inf, norm.l_inf);
norm.l_2 += n.l_2 * n.l_2;
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
norm.l_2 = sqrt(norm.l_2);
return norm;
}
// Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<T1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type type,
const std::vector<T2>& stride,
const size_t dist,
const std::vector<size_t>& offset)
{
switch(length.size())
{
case 1:
return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset);
case 2:
return norm(input,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1]),
dist,
offset);
case 3:
return norm(input,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1], stride[2]),
dist,
offset);
default:
abort();
}
}
// Given a buffer of complex values stored in a vector of chars (or two vectors in the
// case of planar format), impose Hermitian symmetry.
// NB: length is the dimensions of the FFT, not the data layout dimensions.
template <typename Tfloat, typename Tallocator, typename Tsize>
inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride,
const Tsize idist,
const Tsize nbatch)
{
switch(vals.size())
{
case 1:
{
// Complex interleaved data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist;
switch(length.size())
{
case 3:
if(length[2] % 2 == 0)
{
data[istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[1] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)
+ istride[2] * (length[2] / 2)]
.imag(0.0);
// clang format off
}
// y-axis:
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]);
}
if(length[0] % 2 == 0)
{
// y-axis at x-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]);
// clang format on
}
}
// x-axis:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
// x-axis at y-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
// clang format on
}
}
// x-y plane:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * i + istride[1] * j]);
// clang format on
}
}
if(length[2] % 2 == 0)
{
// x-axis at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
if(length[1] % 2 == 0)
{
// x-axis at yz-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
}
// y-axis: at z-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]);
}
if(length[0] % 2 == 0)
{
// y-axis: at xz-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
// x-y plane: at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
}
// fall-through
case 2:
if(length[1] % 2 == 0)
{
data[istride[1] * (length[1] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0);
}
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
}
}
// fall-through
case 1:
data[0].imag(0.0);
if(length[0] % 2 == 0)
{
data[istride[0] * (length[0] / 2)].imag(0.0);
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
case 2:
{
// Complex planar data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist;
switch(length.size())
{
case 3:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 2:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 1:
idata[0] = 0.0;
if(length[0] % 2 == 0)
{
idata[istride[0] * (length[0] / 2)] = 0.0;
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid data type");
break;
}
}
// Given an array type and transform length, strides, etc, load random floats in [0,1]
// into the input array of floats/doubles or complex floats/doubles, which is stored in a
// vector of chars (or two vectors in the case of planar format).
// lengths are the memory lengths (ie not the transform parameters)
template <typename Tfloat, typename Tallocator, typename Tint1>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const Tint1& whole_length,
const Tint1& istride,
const size_t idist,
const size_t nbatch)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
{
auto idata = (std::complex<Tfloat>*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
{
auto ireal = (Tfloat*)input[0].data();
auto iimag = (Tfloat*)input[1].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
ireal[i] = val.real();
iimag[i] = val.imag();
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_real:
{
auto idata = (Tfloat*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max();
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
default:
throw std::runtime_error("Input layout format not yet supported");
break;
}
}
// unroll set_input for dimension 1, 2, 3
template <typename Tfloat, typename Tallocator>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const std::vector<size_t>& length,
const std::vector<size_t>& istride,
const size_t idist,
const size_t nbatch)
{
switch(length.size())
{
case 1:
set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch);
break;
case 2:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1]),
std::make_tuple(istride[0], istride[1]),
idist,
nbatch);
break;
case 3:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1], length[2]),
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
nbatch);
break;
default:
abort();
}
}
// Compute the idist for a given transform based on the placeness, transform type, and
// data layout.
template <typename Tsize>
inline size_t set_idist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_forward && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * istride[0];
}
if(transformType == rocfft_transform_type_real_inverse && dim == 1)
{
return (length[0] / 2 + 1) * istride[0];
}
Tsize idist = (transformType == rocfft_transform_type_real_inverse)
? (length[dim - 1] / 2 + 1) * istride[dim - 1]
: length[dim - 1] * istride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
idist = std::max(length[i] * istride[i], idist);
}
return idist;
}
// Compute the odist for a given transform based on the placeness, transform type, and
// data layout. Row-major.
template <typename Tsize>
inline size_t set_odist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& ostride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_inverse && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * ostride[0];
}
if(transformType == rocfft_transform_type_real_forward && dim == 1)
{
return (length[0] / 2 + 1) * ostride[0];
}
Tsize odist = (transformType == rocfft_transform_type_real_forward)
? (length[dim - 1] / 2 + 1) * ostride[dim - 1]
: length[dim - 1] * ostride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
odist = std::max(length[i] * ostride[i], odist);
}
return odist;
}
// Given a data type and precision, the distance between batches, and the batch size,
// allocate the required host buffer(s).
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> allocate_host_buffer(
const rocfft_precision precision, const rocfft_array_type type, const std::vector<size_t>& size)
{
std::vector<std::vector<char, Allocator>> buffers(size.size());
for(int i = 0; i < size.size(); ++i)
{
buffers[i].resize(size[i] * var_size<size_t>(precision, type));
}
return buffers;
}
// Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if
// necessary.
// NB: length is the logical size of the FFT, and not necessarily the data dimensions
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> compute_input(const rocfft_params& params)
{
auto input = allocate_host_buffer<Allocator>(params.precision, params.itype, params.isize);
for(auto& i : input)
{
std::fill(i.begin(), i.end(), 0.0);
}
switch(params.precision)
{
case rocfft_precision_double:
set_input<double>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
set_input<float>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
}
if(params.itype == rocfft_array_type_hermitian_interleaved
|| params.itype == rocfft_array_type_hermitian_planar)
{
switch(params.precision)
{
case rocfft_precision_double:
impose_hermitian_symmetry<double>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
impose_hermitian_symmetry<float>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
}
}
return input;
}
// Check that the input and output types are consistent.
inline void check_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const rocfft_array_type itype,
const rocfft_array_type otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
// Check that format choices are supported
if(transformType != rocfft_transform_type_real_forward
&& transformType != rocfft_transform_type_real_inverse)
{
if(place == rocfft_placement_inplace && itype != otype)
{
throw std::runtime_error(
"In-place transforms must have identical input and output types");
}
}
bool okformat = true;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
okformat = (otype == rocfft_array_type_complex_interleaved
|| otype == rocfft_array_type_complex_planar);
break;
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
okformat = otype == rocfft_array_type_real;
break;
case rocfft_array_type_real:
okformat = (otype == rocfft_array_type_hermitian_interleaved
|| otype == rocfft_array_type_hermitian_planar);
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
okformat = false;
}
if(!okformat)
{
throw std::runtime_error("Invalid combination of Input/Output array type formats");
}
}
// Check that the input and output types are consistent. If they are unset, assign
// default values based on the transform type.
inline void check_set_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
rocfft_array_type& itype,
rocfft_array_type& otype)
{
if(itype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
itype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
itype = rocfft_array_type_real;
break;
case rocfft_transform_type_real_inverse:
itype = rocfft_array_type_hermitian_interleaved;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
if(otype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
otype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
otype = rocfft_array_type_hermitian_interleaved;
break;
case rocfft_transform_type_real_inverse:
otype = rocfft_array_type_real;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
check_iotypes(place, transformType, itype, otype);
}
#endif
|
pr26943-1.c | /* PR c++/26943 */
/* { dg-do run } */
extern void abort (void);
extern void omp_set_dynamic (int);
int n = 6;
int
main (void)
{
int i, x = 0;
omp_set_dynamic (0);
#pragma omp parallel for num_threads (16) firstprivate (n) lastprivate (n) \
schedule (static, 1) reduction (+: x)
for (i = 0; i < 16; i++)
{
if (n != 6)
++x;
n = i;
}
if (x || n != 15)
abort ();
return 0;
}
|
GB_binop__isgt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint16)
// A*D function (colscale): GB (_AxD__isgt_uint16)
// D*A function (rowscale): GB (_DxB__isgt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint16)
// C=scalar+B GB (_bind1st__isgt_uint16)
// C=scalar+B' GB (_bind1st_tran__isgt_uint16)
// C=A+scalar GB (_bind2nd__isgt_uint16)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
semi_restrict.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
typedef struct
{
hypre_StructMatrix *R;
HYPRE_Int R_stored_as_transpose;
hypre_ComputePkg *compute_pkg;
hypre_Index cindex;
hypre_Index stride;
HYPRE_Int time_index;
} hypre_SemiRestrictData;
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
void *
hypre_SemiRestrictCreate( )
{
hypre_SemiRestrictData *restrict_data;
restrict_data = hypre_CTAlloc(hypre_SemiRestrictData, 1);
(restrict_data -> time_index) = hypre_InitializeTiming("SemiRestrict");
return (void *) restrict_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SemiRestrictSetup( void *restrict_vdata,
hypre_StructMatrix *R,
HYPRE_Int R_stored_as_transpose,
hypre_StructVector *r,
hypre_StructVector *rc,
hypre_Index cindex,
hypre_Index findex,
hypre_Index stride )
{
hypre_SemiRestrictData *restrict_data = (hypre_SemiRestrictData *)restrict_vdata;
hypre_StructGrid *grid;
hypre_StructStencil *stencil;
hypre_ComputeInfo *compute_info;
hypre_ComputePkg *compute_pkg;
/*----------------------------------------------------------
* Set up the compute package
*----------------------------------------------------------*/
grid = hypre_StructVectorGrid(r);
stencil = hypre_StructMatrixStencil(R);
hypre_CreateComputeInfo(grid, stencil, &compute_info);
hypre_ComputeInfoProjectSend(compute_info, findex, stride);
hypre_ComputeInfoProjectRecv(compute_info, findex, stride);
hypre_ComputeInfoProjectComp(compute_info, cindex, stride);
hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(r), 1,
grid, &compute_pkg);
/*----------------------------------------------------------
* Set up the restrict data structure
*----------------------------------------------------------*/
(restrict_data -> R) = hypre_StructMatrixRef(R);
(restrict_data -> R_stored_as_transpose) = R_stored_as_transpose;
(restrict_data -> compute_pkg) = compute_pkg;
hypre_CopyIndex(cindex ,(restrict_data -> cindex));
hypre_CopyIndex(stride ,(restrict_data -> stride));
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SemiRestrict( void *restrict_vdata,
hypre_StructMatrix *R,
hypre_StructVector *r,
hypre_StructVector *rc )
{
hypre_SemiRestrictData *restrict_data = (hypre_SemiRestrictData *)restrict_vdata;
HYPRE_Int R_stored_as_transpose;
hypre_ComputePkg *compute_pkg;
hypre_IndexRef cindex;
hypre_IndexRef stride;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *R_dbox;
hypre_Box *r_dbox;
hypre_Box *rc_dbox;
HYPRE_Int Ri;
HYPRE_Int ri;
HYPRE_Int rci;
HYPRE_Int constant_coefficient;
HYPRE_Real *Rp0, *Rp1;
HYPRE_Real *rp, *rp0, *rp1;
HYPRE_Real *rcp;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index startc;
hypre_Index stridec;
hypre_StructStencil *stencil;
hypre_Index *stencil_shape;
HYPRE_Int compute_i, fi, ci, j;
/*-----------------------------------------------------------------------
* Initialize some things.
*-----------------------------------------------------------------------*/
hypre_BeginTiming(restrict_data -> time_index);
R_stored_as_transpose = (restrict_data -> R_stored_as_transpose);
compute_pkg = (restrict_data -> compute_pkg);
cindex = (restrict_data -> cindex);
stride = (restrict_data -> stride);
stencil = hypre_StructMatrixStencil(R);
stencil_shape = hypre_StructStencilShape(stencil);
constant_coefficient = hypre_StructMatrixConstantCoefficient(R);
hypre_assert( constant_coefficient==0 || constant_coefficient==1 );
/* ... if A has constant_coefficient==2, R has constant_coefficient==0 */
if (constant_coefficient) hypre_StructVectorClearBoundGhostValues(r, 0);
hypre_SetIndex3(stridec, 1, 1, 1);
/*--------------------------------------------------------------------
* Restrict the residual.
*--------------------------------------------------------------------*/
fgrid = hypre_StructVectorGrid(r);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructVectorGrid(rc);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
rp = hypre_StructVectorData(r);
hypre_InitializeIndtComputations(compute_pkg, rp, &comm_handle);
compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg);
}
break;
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
r_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), fi);
rc_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(rc), ci);
if (R_stored_as_transpose)
{
if ( constant_coefficient )
{
Rp0 = hypre_StructMatrixBoxData(R, fi, 1) -
hypre_CCBoxOffsetDistance(R_dbox, stencil_shape[1]);
Rp1 = hypre_StructMatrixBoxData(R, fi, 0);
}
else
{
Rp0 = hypre_StructMatrixBoxData(R, fi, 1) -
hypre_BoxOffsetDistance(R_dbox, stencil_shape[1]);
Rp1 = hypre_StructMatrixBoxData(R, fi, 0);
}
}
else
{
Rp0 = hypre_StructMatrixBoxData(R, fi, 0);
Rp1 = hypre_StructMatrixBoxData(R, fi, 1);
}
rp = hypre_StructVectorBoxData(r, fi);
rp0 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[0]);
rp1 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[1]);
rcp = hypre_StructVectorBoxData(rc, ci);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
start = hypre_BoxIMin(compute_box);
hypre_StructMapFineToCoarse(start, cindex, stride, startc);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
if ( constant_coefficient )
{
Ri = hypre_CCBoxIndexRank( R_dbox, startc );
hypre_BoxLoop2Begin(hypre_StructMatrixNDim(R), loop_size,
r_dbox, start, stride, ri,
rc_dbox, startc, stridec, rci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,ri,rci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(ri, rci)
{
rcp[rci] = rp[ri] + (Rp0[Ri] * rp0[ri] +
Rp1[Ri] * rp1[ri]);
}
hypre_BoxLoop2End(ri, rci);
}
else
{
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(R), loop_size,
R_dbox, startc, stridec, Ri,
r_dbox, start, stride, ri,
rc_dbox, startc, stridec, rci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ri,ri,rci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ri, ri, rci)
{
rcp[rci] = rp[ri] + (Rp0[Ri] * rp0[ri] +
Rp1[Ri] * rp1[ri]);
}
hypre_BoxLoop3End(Ri, ri, rci);
}
}
}
}
/*-----------------------------------------------------------------------
* Return
*-----------------------------------------------------------------------*/
hypre_IncFLOPCount(4*hypre_StructVectorGlobalSize(rc));
hypre_EndTiming(restrict_data -> time_index);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SemiRestrictDestroy( void *restrict_vdata )
{
hypre_SemiRestrictData *restrict_data = (hypre_SemiRestrictData *)restrict_vdata;
if (restrict_data)
{
hypre_StructMatrixDestroy(restrict_data -> R);
hypre_ComputePkgDestroy(restrict_data -> compute_pkg);
hypre_FinalizeTiming(restrict_data -> time_index);
hypre_TFree(restrict_data);
}
return hypre_error_flag;
}
|
builder.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "timer.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_, bool invert = true>
class BuilderBase
{
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_nodes_ = -1;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli)
{
symmetrize_ = cli_.symmetrize();
needs_weights_ = !std::is_same<NodeID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<NodeID_, NodeID_> e)
{
return e.u;
}
DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e)
{
return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w);
}
NodeID_ FindMaxNodeID(const EdgeList &el)
{
NodeID_ max_seen = 0;
#pragma omp parallel for reduction(max \
: max_seen)
for (auto it = el.begin(); it < el.end(); it++)
{
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (NodeID_)e.v);
}
return max_seen;
}
pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose)
{
pvector<NodeID_> degrees(num_nodes_, 0);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++)
{
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
fetch_and_add(degrees[e.u], 1);
if (symmetrize_ || (!symmetrize_ && transpose))
fetch_and_add(degrees[(NodeID_)e.v], 1);
}
return degrees;
}
static pvector<SGOffset> PrefixSum(const pvector<NodeID_> °rees)
{
pvector<SGOffset> sums(degrees.size() + 1);
SGOffset total = 0;
for (size_t n = 0; n < degrees.size(); n++)
{
sums[n] = total;
total += degrees[n];
}
sums[degrees.size()] = total;
return sums;
}
static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> °rees)
{
const size_t block_size = 1 << 20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<SGOffset> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block = 0; block < num_blocks; block++)
{
SGOffset lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i = block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<SGOffset> bulk_prefix(num_blocks + 1);
SGOffset total = 0;
for (size_t block = 0; block < num_blocks; block++)
{
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<SGOffset> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block = 0; block < num_blocks; block++)
{
SGOffset local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i = block * block_size; i < block_end; i++)
{
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
// Removes self-loops and redundant edges
// Side effect: neighbor IDs will be sorted
void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose,
DestID_ ***sq_index, DestID_ **sq_neighs)
{
pvector<NodeID_> diffs(g.num_nodes());
DestID_ *n_start, *n_end;
#pragma omp parallel for private(n_start, n_end)
for (NodeID_ n = 0; n < g.num_nodes(); n++)
{
if (transpose)
{
n_start = g.in_neigh(n).begin();
n_end = g.in_neigh(n).end();
}
else
{
n_start = g.out_neigh(n).begin();
n_end = g.out_neigh(n).end();
}
std::sort(n_start, n_end);
DestID_ *new_end = std::unique(n_start, n_end);
new_end = std::remove(n_start, new_end, n);
diffs[n] = new_end - n_start;
}
pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
*sq_neighs = new DestID_[sq_offsets[g.num_nodes()]];
*sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
#pragma omp parallel for private(n_start)
for (NodeID_ n = 0; n < g.num_nodes(); n++)
{
if (transpose)
n_start = g.in_neigh(n).begin();
else
n_start = g.out_neigh(n).begin();
std::copy(n_start, n_start + diffs[n], (*sq_index)[n]);
}
}
CSRGraph<NodeID_, DestID_, invert> SquishGraph(
const CSRGraph<NodeID_, DestID_, invert> &g)
{
DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
SquishCSR(g, false, &out_index, &out_neighs);
if (g.directed())
{
if (invert)
SquishCSR(g, true, &in_index, &in_neighs);
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs, in_index,
in_neighs);
}
else
{
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs);
}
}
/*
Graph Bulding Steps (for CSR):
- Read edgelist once to determine vertex degrees (CountDegrees)
- Determine vertex offsets by a prefix sum (ParallelPrefixSum)
- Allocate storage and set points according to offsets (GenIndex)
- Copy edges into storage
*/
void MakeCSR(const EdgeList &el, bool transpose, DestID_ ***index,
DestID_ **neighs)
{
pvector<NodeID_> degrees = CountDegrees(el, transpose);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
*neighs = new DestID_[offsets[num_nodes_]];
*index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++)
{
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
(*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
if (symmetrize_ || (!symmetrize_ && transpose))
(*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] =
GetSource(e);
}
}
CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el)
{
DestID_ **index = nullptr, **inv_index = nullptr;
DestID_ *neighs = nullptr, *inv_neighs = nullptr;
Timer t;
t.Start();
if (num_nodes_ == -1)
num_nodes_ = FindMaxNodeID(el) + 1;
if (needs_weights_)
Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
MakeCSR(el, false, &index, &neighs);
if (!symmetrize_ && invert)
MakeCSR(el, true, &inv_index, &inv_neighs);
t.Stop();
// PrintTime("Build Time", t.Seconds());
if (symmetrize_)
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs);
else
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs,
inv_index, inv_neighs);
}
CSRGraph<NodeID_, DestID_, invert> MakeGraph()
{
CSRGraph<NodeID_, DestID_, invert> g;
{ // extra scope to trigger earlier deletion of el (save memory)
EdgeList el;
if (cli_.filename() != "")
{
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename());
if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg"))
{
return r.ReadSerializedGraph();
}
else
{
el = r.ReadFile(needs_weights_);
}
}
else if (cli_.scale() != -1)
{
Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree());
el = gen.GenerateEL(cli_.uniform());
}
g = MakeGraphFromEL(el);
}
return SquishGraph(g);
}
// Relabels (and rebuilds) graph by order of decreasing degree
static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree(
const CSRGraph<NodeID_, DestID_, invert> &g)
{
if (g.directed())
{
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, NodeID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n = 0; n < g.num_nodes(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> new_ids(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n = 0; n < g.num_nodes(); n++)
{
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_ *neighs = new DestID_[offsets[g.num_nodes()]];
DestID_ **index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for
for (NodeID_ u = 0; u < g.num_nodes(); u++)
{
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u] + 1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
//make csrgraph from edgelist (Henry T.)
static CSRGraph<NodeID_, DestID_, invert> Load_CSR_From_Edgelist(EdgeList &el, bool symmetrize)
{
CLBase cli(0, NULL);
BuilderBase<NodeID_, DestID_, WeightT_> bb(cli);
bb.needs_weights_ = false;
bb.symmetrize_ = symmetrize;
// auto tmp = bb.MakeGraphFromEL(el);
// return bb.SquishGraph(tmp);
return bb.MakeGraphFromEL(el);
}
//make csrgraph from edgelist (Henry T.)
static CSRGraph<NodeID_, DestID_, invert> Load_CSR_From_Edgelist_Squished(EdgeList &el, bool symmetrize)
{
CLBase cli(0, NULL);
BuilderBase<NodeID_, DestID_, WeightT_> bb(cli);
bb.needs_weights_ = false;
bb.symmetrize_ = symmetrize;
auto tmp = bb.MakeGraphFromEL(el);
return bb.SquishGraph(tmp);
// return bb.MakeGraphFromEL(el);
}
};
#endif // BUILDER_H_
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
const int* requested_pixel_types,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
dqp3.c | /*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @file src/backends/mpi/blrm/dqp3.c
* @version 1.3.0
* @author Aleksandr Mikhalev
* @date 2017-11-07
* */
#include "common.h"
#include "starsh.h"
#include "starsh-mpi.h"
int starsh_blrm__dqp3_mpi(STARSH_blrm **matrix, STARSH_blrf *format,
int maxrank, double tol, int onfly)
//! Approximate each tile of BLR matrix with RRQR (GEQP3 function).
/*!
* @param[out] matrix: Address of pointer to @ref STARSH_blrm object.
* @param[in] format: Block low-rank format.
* @param[in] maxrank: Maximum possible rank.
* @param[in] tol: Relative error tolerance.
* @param[in] onfly: Whether not to store dense blocks.
* @return Error code @ref STARSH_ERRNO.
* @ingroup blrm
* */
{
STARSH_blrf *F = format;
STARSH_problem *P = F->problem;
STARSH_kernel *kernel = P->kernel;
STARSH_int nblocks_far = F->nblocks_far;
STARSH_int nblocks_near = F->nblocks_near;
STARSH_int nblocks_far_local = F->nblocks_far_local;
STARSH_int nblocks_near_local = F->nblocks_near_local;
// Shortcuts to information about clusters
STARSH_cluster *RC = F->row_cluster;
STARSH_cluster *CC = F->col_cluster;
void *RD = RC->data, *CD = CC->data;
// Following values default to given block low-rank format F, but they are
// changed when there are false far-field blocks.
STARSH_int new_nblocks_far = F->nblocks_far;
STARSH_int new_nblocks_near = F->nblocks_near;
STARSH_int new_nblocks_far_local = F->nblocks_far_local;
STARSH_int new_nblocks_near_local = F->nblocks_near_local;
STARSH_int *block_far = F->block_far;
STARSH_int *block_near = F->block_near;
STARSH_int *block_far_local = F->block_far_local;
STARSH_int *block_near_local = F->block_near_local;
// Places to store low-rank factors, dense blocks and ranks
Array **far_U = NULL, **far_V = NULL, **near_D = NULL;
int *far_rank = NULL;
double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL;
size_t offset_U = 0, offset_V = 0, offset_D = 0;
STARSH_int lbi, lbj, bi, bj = 0;
double drsdd_time = 0, kernel_time = 0;
const int oversample = starsh_params.oversample;
// Init buffers to store low-rank factors of far-field blocks if needed
if(nblocks_far > 0)
{
STARSH_MALLOC(far_U, nblocks_far_local);
STARSH_MALLOC(far_V, nblocks_far_local);
STARSH_MALLOC(far_rank, nblocks_far_local);
size_t size_U = 0, size_V = 0;
// Simple cycle over all far-field blocks
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_U += RC->size[i];
size_V += CC->size[j];
}
size_U *= maxrank;
size_V *= maxrank;
STARSH_MALLOC(alloc_U, size_U);
STARSH_MALLOC(alloc_V, size_V);
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i], ncols = CC->size[j];
int shape_U[] = {nrows, maxrank};
int shape_V[] = {ncols, maxrank};
double *U = alloc_U+offset_U, *V = alloc_V+offset_V;
offset_U += nrows*maxrank;
offset_V += ncols*maxrank;
array_from_buffer(far_U+lbi, 2, shape_U, 'd', 'F', U);
array_from_buffer(far_V+lbi, 2, shape_V, 'd', 'F', V);
}
offset_U = 0;
offset_V = 0;
}
// Work variables
int info;
// Simple cycle over all far-field admissible blocks
#pragma omp parallel for schedule(dynamic, 1)
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
int mn = nrows < ncols ? nrows : ncols;
int mn2 = maxrank+oversample;
if(mn2 > mn)
mn2 = mn;
// Get size of temporary arrays
int lwork = 3*ncols+1, lwork_sdd = (4*(size_t)mn2+7)*mn2;
if(lwork_sdd > lwork)
lwork = lwork_sdd;
lwork += (size_t)mn2*(2*ncols+mn2+1)+mn;
int liwork = ncols, liwork_sdd = 8*mn2;
if(liwork_sdd > liwork)
liwork = liwork_sdd;
double *D, *work;
int *iwork;
int info;
// Allocate temporary arrays
STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);
STARSH_PMALLOC(iwork, liwork, info);
STARSH_PMALLOC(work, lwork, info);
// Compute elements of a block
#ifdef OPENMP
double time0 = omp_get_wtime();
#endif
kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],
RD, CD, D, nrows);
#ifdef OPENMP
double time1 = omp_get_wtime();
#endif
starsh_dense_dlrqp3(nrows, ncols, D, nrows, far_U[lbi]->data, nrows,
far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample,
tol, work, lwork, iwork);
#ifdef OPENMP
double time2 = omp_get_wtime();
#pragma omp critical
{
drsdd_time += time2-time1;
kernel_time += time1-time0;
}
#endif
// Free temporary arrays
free(D);
free(work);
free(iwork);
}
// Get number of false far-field blocks
STARSH_int nblocks_false_far_local = 0;
STARSH_int *false_far_local = NULL;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
if(far_rank[lbi] == -1)
nblocks_false_far_local++;
if(nblocks_false_far_local > 0)
{
// IMPORTANT: `false_far` and `false_far_local` must be in
// ascending order for later code to work normally
STARSH_MALLOC(false_far_local, nblocks_false_far_local);
lbj = 0;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
if(far_rank[lbi] == -1)
false_far_local[lbj++] = block_far_local[lbi];
}
// Sync list of all false far-field blocks
STARSH_int nblocks_false_far = 0;
int int_nblocks_false_far_local = nblocks_false_far_local;
int *mpi_recvcount, *mpi_offset;
int mpi_size, mpi_rank;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
STARSH_MALLOC(mpi_recvcount, mpi_size);
STARSH_MALLOC(mpi_offset, mpi_size);
MPI_Allgather(&int_nblocks_false_far_local, 1, MPI_INT, mpi_recvcount,
1, MPI_INT, MPI_COMM_WORLD);
for(bi = 0; bi < mpi_size; bi++)
nblocks_false_far += mpi_recvcount[bi];
mpi_offset[0] = 0;
for(bi = 1; bi < mpi_size; bi++)
mpi_offset[bi] = mpi_offset[bi-1]+mpi_recvcount[bi-1];
STARSH_int *false_far = NULL;
if(nblocks_false_far > 0)
STARSH_MALLOC(false_far, nblocks_false_far);
MPI_Allgatherv(false_far_local, nblocks_false_far_local, my_MPI_SIZE_T,
false_far, mpi_recvcount, mpi_offset, my_MPI_SIZE_T,
MPI_COMM_WORLD);
free(mpi_recvcount);
free(mpi_offset);
// Make false_far be in ascending order
qsort(false_far, nblocks_false_far, sizeof(*false_far), cmp_size_t);
if(nblocks_false_far > 0)
{
// Update list of near-field blocks
new_nblocks_near = nblocks_near+nblocks_false_far;
new_nblocks_near_local = nblocks_near_local+nblocks_false_far_local;
STARSH_MALLOC(block_near, 2*new_nblocks_near);
if(new_nblocks_near_local > 0)
STARSH_MALLOC(block_near_local, new_nblocks_near_local);
// At first get all near-field blocks, assumed to be dense
#pragma omp parallel for schedule(static)
for(bi = 0; bi < 2*nblocks_near; bi++)
block_near[bi] = F->block_near[bi];
#pragma omp parallel for schedule(static)
for(lbi = 0; lbi < nblocks_near_local; lbi++)
block_near_local[lbi] = F->block_near_local[lbi];
// Add false far-field blocks
#pragma omp parallel for schedule(static)
for(bi = 0; bi < nblocks_false_far; bi++)
{
STARSH_int bj = false_far[bi];
block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];
block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];
}
bi = 0;
for(lbi = 0; lbi < nblocks_false_far_local; lbi++)
{
lbj = false_far_local[lbi];
while(bi < nblocks_false_far && false_far[bi] < lbj)
bi++;
block_near_local[nblocks_near_local+lbi] = nblocks_near+bi;
}
// Update list of far-field blocks
new_nblocks_far = nblocks_far-nblocks_false_far;
new_nblocks_far_local = nblocks_far_local-nblocks_false_far_local;
if(new_nblocks_far > 0)
{
STARSH_MALLOC(block_far, 2*new_nblocks_far);
if(new_nblocks_far_local > 0)
STARSH_MALLOC(block_far_local, new_nblocks_far_local);
bj = 0;
lbi = 0;
lbj = 0;
for(bi = 0; bi < nblocks_far; bi++)
{
// `false_far` must be in ascending order for this to work
if(bj < nblocks_false_far && false_far[bj] == bi)
{
if(nblocks_false_far_local > lbj &&
false_far_local[lbj] == bi)
{
lbi++;
lbj++;
}
bj++;
}
else
{
block_far[2*(bi-bj)] = F->block_far[2*bi];
block_far[2*(bi-bj)+1] = F->block_far[2*bi+1];
if(nblocks_far_local > lbi &&
F->block_far_local[lbi] == bi)
{
block_far_local[lbi-lbj] = bi-bj;
lbi++;
}
}
}
}
// Update format by creating new format
STARSH_blrf *F2;
info = starsh_blrf_new_from_coo_mpi(&F2, P, F->symm, RC, CC,
new_nblocks_far, block_far, new_nblocks_far_local,
block_far_local, new_nblocks_near, block_near,
new_nblocks_near_local, block_near_local, F->type);
// Swap internal data of formats and free unnecessary data
STARSH_blrf tmp_blrf = *F;
*F = *F2;
*F2 = tmp_blrf;
if(mpi_rank == 0)
STARSH_WARNING("`F` was modified due to false far-field blocks");
starsh_blrf_free(F2);
}
// Compute near-field blocks if needed
if(onfly == 0 && new_nblocks_near > 0)
{
STARSH_MALLOC(near_D, new_nblocks_near_local);
size_t size_D = 0;
// Simple cycle over all near-field blocks
for(lbi = 0; lbi < new_nblocks_near_local; lbi++)
{
STARSH_int bi = block_near_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i];
size_t ncols = CC->size[j];
// Update size_D
size_D += nrows*ncols;
}
STARSH_MALLOC(alloc_D, size_D);
// For each near-field block compute its elements
#pragma omp parallel for schedule(dynamic, 1)
for(lbi = 0; lbi < new_nblocks_near_local; lbi++)
{
STARSH_int bi = block_near_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
int shape[2] = {nrows, ncols};
double *D;
#pragma omp critical
{
D = alloc_D+offset_D;
offset_D += nrows*ncols;
//array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);
//offset_D += near_D[lbi]->size;
}
array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);
#ifdef OPENMP
double time0 = omp_get_wtime();
#endif
kernel(nrows, ncols, RC->pivot+RC->start[i],
CC->pivot+CC->start[j], RD, CD, D, nrows);
#ifdef OPENMP
double time1 = omp_get_wtime();
#pragma omp critical
kernel_time += time1-time0;
#endif
}
}
// Change sizes of far_rank, far_U and far_V if there were false
// far-field blocks
lbj = 0;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
if(far_rank[lbi] == -1)
lbj++;
else
{
int shape_U[2] = {far_U[lbi]->shape[0], far_rank[lbi]};
int shape_V[2] = {far_V[lbi]->shape[0], far_rank[lbi]};
array_from_buffer(far_U+lbi-lbj, 2, shape_U, 'd', 'F',
far_U[lbi]->data);
array_from_buffer(far_V+lbi-lbj, 2, shape_V, 'd', 'F',
far_V[lbi]->data);
far_rank[lbi-lbj] = far_rank[lbi];
}
}
if(nblocks_false_far_local > 0 && new_nblocks_far_local > 0)
{
STARSH_REALLOC(far_rank, new_nblocks_far_local);
STARSH_REALLOC(far_U, new_nblocks_far_local);
STARSH_REALLOC(far_V, new_nblocks_far_local);
}
// If all far-field blocks are false, then dealloc buffers
if(new_nblocks_far_local == 0 && nblocks_far_local > 0)
{
block_far = NULL;
free(far_rank);
far_rank = NULL;
free(far_U);
far_U = NULL;
free(far_V);
far_V = NULL;
free(alloc_U);
alloc_U = NULL;
free(alloc_V);
alloc_V = NULL;
}
// Dealloc list of false far-field blocks if it is not empty
if(nblocks_false_far > 0)
free(false_far);
if(nblocks_false_far_local > 0)
free(false_far_local);
// Finish with creating instance of Block Low-Rank Matrix with given
// buffers
#ifdef OPENMP
double mpi_drsdd_time = 0, mpi_kernel_time = 0;
MPI_Reduce(&drsdd_time, &mpi_drsdd_time, 1, MPI_DOUBLE, MPI_SUM, 0,
MPI_COMM_WORLD);
MPI_Reduce(&kernel_time, &mpi_kernel_time, 1, MPI_DOUBLE, MPI_SUM, 0,
MPI_COMM_WORLD);
if(mpi_rank == 0)
{
//STARSH_WARNING("DRSDD kernel total time: %e secs", mpi_drsdd_time);
//STARSH_WARNING("MATRIX kernel total time: %e secs", mpi_kernel_time);
}
#endif
return starsh_blrm_new_mpi(matrix, F, far_rank, far_U, far_V, onfly,
near_D, alloc_U, alloc_V, alloc_D, '1');
}
|
krb5_tgs_fmt_plug.c | /*
* Based on the work by Tim Medin
* Port from his Pythonscript to John by Michael Kramer (SySS GmbH)
*
* This software is
* Copyright (c) 2015 Michael Kramer <michael.kramer@uni-konstanz.de>,
* Copyright (c) 2015 magnum
* Copyright (c) 2016 Fist0urs <eddy.maaalou@gmail.com>
*
* Modified by Fist0urs to improve performances by proceeding known-plain
* attack, based on defined ASN1 structures (then got rid of RC4 rounds
* + hmac-md5)
*
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5tgs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5tgs);
#else
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#include "dyna_salt.h"
#include "rc4.h"
#include "md4.h"
#include "hmacmd5.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#define FORMAT_LABEL "krb5tgs"
#define FORMAT_NAME "Kerberos 5 TGS etype 23"
#define ALGORITHM_NAME "MD4 HMAC-MD5 RC4"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define MIN_PLAINTEXT_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(struct custom_salt *)
#define SALT_ALIGN sizeof(struct custom_salt *)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/*
assuming checksum == edata1
formats are:
checksum$edata2
$krb5tgs$23$checksum$edata2
$krb5tgs$23$*user*realm*spn*$checksum$edata2
*/
static struct fmt_tests tests[] = {
{"74809c4c83c3c8279c6058d2f206ec2f$78b4bbd4d229487d5afc9a6050d4144ce10e9245cdfc0df542879814ce740cebb970ee820677041596d7e55836a18cc95c04169e7c74a4a22ae94e66f3d37150e26cc9cb99e189ef54feb7a40a8db2cb2c41db80d8927c74da7b33b52c58742d2109036b8ab27184609e7adff27b8f17b2f2a7b7d85e4ad532d8a70d48685a4390a9fc7a0ab47fd17334534d795abf83462f0db3de931c6a2d5988ab5bf3253facfff1381afb192ce385511c9052f2915ffdb7ea28a1bbad0573d9071e79dc15068527d50100de8813793a15c292f145fa3797ba86f373a4f0a05e5f2ec7dbfd8c8b5139cc7fbb098ea1dd91a7440134ffe2aff7174d0df13dcad82c81c680a70127a3ec8792bdecd74a878f97ff2b21277dc8c9a2f7bbcd9f72560dd933d85585259067d45a46a6f505d03f188b62c37edf03f117503a26743ebd674d5b07324c15fc8418881613b365402e0176da97d43cf85e8239b69aee07791233a959bcaf83a7f492fa718dd0a1747eaf5ce626eb11bda89e8235a056e2721f45c3b61442d893ef32a8c192ea0dadb853f3c6f3c75e92f23c744605c6f55578f696b0f33a9586b8aae3e12e38a097692cd9a31d780d973eaaf62ef23b2fc9ae59a38bfd8ea14d3289b46910f61a90aa733e66382bc27f40ba634e55ef1bec0ca7f71546b79566d85664b92f9fae495fcef5cde4c4399a6798569a7e81b9cc4bdde7104f3fe181401f82bba944e3b0a406c7093c00ff9d5984a82517b1a64a8aa561bc1f0cbafbdbbc5654d375c91d4e485e17bb06838109fbc1504147481c91652f545086a84daa423a6286ea6bb13460c5ff3d865a7b37b9ce4e7b07fbe2f6897c12c1e4df2e875c1ec9cfbf84097a7f48b270baf3481263b21849ab93c231490d06a23461a5e00c23df76bca8e5a19256d859304e1f5752bf055ac7f4843e1ad174f1cbbf5c142958f9310025ce439d5979982fb0b8c2ea95e1a22ee8dc63423d9d364cb0b95bcdf89ec4ed485b9005326d728757d77aa3e020a4a61d7deb782bc5264dca350173609772cd6d003ee8104dd24d310c9a18a44f78e27d65095f5bb54f6118c8f0d79ad5a850cec8d40a19bd0134144e904c9eb7fdcff3293696071fc1118f6b2f934281a25bcd5ca7d567714b1e43bd6d09bfcc8744c0ca273a75938394ac2fb31957287093346078577c94a71dfa6ad4a63211f54f00ef7a9064d070aaff84116ee891728915c938a8a32e87aaa00ec18e2b4e9ae88f7e53f08d855052a995f92351be32d8df934eab487103b0f089828e5fb5f73af3a8a05b9fffd25c43a392994743de3de1a2a9b8bba27e02ae2341f09d63aafab291759c41b9635521ca02f08e21e7e5c3ce75c8c3515eaa99aeb9bf8e204663e8b6b8507ecf87230a131687b4770e250ba0ef29fa3ca3b47b34971e17c6a3ef785acdd7c90da9d2", "test123"},
{"$krb5tgs$23$ee09e292a05e3af870417758b1cdfd04$a1a480b8505d2f2f0ff06ddce40c2f6e76bd06fa64dcd5b0646a68effcd686b2e41562ebda90da7e7b36d95cd16ca8a33b8d99184d6b7fa7a2efec3a05dcb63b3e815ffd38849dc69174d1efb3a871544b73a6da55d2331bd4b60743d1654873e3c1748ce155c35a1711695296ab944d158e374b67f43dd07eab2bcacec1be480e5c1338e3834f7133909f5c7970ece39e73bd96d40f696cb5a8575e5e1feab937b616d6180cc3258e22b9fc495017593e15fc10e674af8184c282a0d80902ea9dabda5fb0a56d7980bfd4b62b330155cd8e318dc5be55500cb8ddd691b629af371463c411f1c11d21811e1546477b85f0a85e296f5df737930aff5015111d2f01a236ab7c77e9dab001f52400cccbcdb31bb180db027bd0fa2f6000dce7c1e072c0effbdee23a401720b1fe54a09a75430497f42f6e047d62d1123866d6ed37e58f8e2c1e462acb1a97a44a5ccef49897af190a46b3ab057d18c1e47d717c7a63658357d58d9cd5b7672f0a946f95f6e2ec3aee549e20e3b11237ea59f87723f24e03a6fac9e51086bc84142631ed36ee6855920f3d3d1e85d0faaa0a8b04a2b050b17f94d44af7f48302fa70dcf43279415983924e5d874c59722b6fb87ad1006fcb51e4341bb2cc4caf8c4b7993269af219cf4efa12b1009961c22f123c35f982e4ca75a97cd37f7f16be111ad301637ffb1664ccb021d3cf6bf771e07dc42202dac079c6bd7559f8e7a939bc14e9ddb45fe1b88c5f83b1ff966342bb9211afd15772cf5f871d39d0b30776d51d84b046df30d250c1877d146047e784c4bc2e6745f357dd0b1c6aaa11e26a0e3c2772781695f6a3bc536ba19e2327ec8c0866bd78d3b5b067abcf6991eafc8b7a11ad4049711263f3c68b358f246da1308d5a0daac1d7efedbc237be3d6a4bafe5ce66e941f7227d2b869bda637dfd223a4546340c59e7d0e2b58f60a67590468a53a5d28cc35cec36a9c5610c70c0633767539640b42cff787f4782057ff70d0e64658413429347f5449c1360da4d0827c4197bbb0361c6d0e04bcaf6bba1233912f806772146c0e778ac06749bbd3d8819007d070ae912580ff11a41f71b0907b88fb585585ebe42b4cc4ecde8ff7b49a856dd70f316425e53feff3ee6ca1e44d9ba5e607a41cf26edf44bffe2796f94ea2d767fbf81f665a7fedf0291e76c6fa409dc99c56954f21edc77f6173c5a3a909c8756f3cc5cc6c2d2e405f333ee0b50284aacfb81f9dfc6058b78b282db4511580eb623dc393919befc250d224490936e5fb16c483f4bd00c8915288d0ddf3812eaa3d46ad5a24c56390076730d23b2de6558ddadddba725f9b4a73d13de3e1276fc285194e3a2f613d9b020d0485d7e26b36b7b917f4911024127320627066fabbd465b4cd5d5fdebae804d15db0f5b276659364bec32a13a8d9e11349f54bd", "bluvshop2"},
{"$krb5tgs$23$*Fist0urs$MYDOMAIN$cifs/panda.com*$423cb47a258e5859c13381ae64de7464$8dd47d94e288a1b32af726d2eac33710fb1610e4c6f674907d7a74d26515a314173b2b531baa790b70467ebe538fc9e941bf4d7f7218a4ec17c1dc963b717d5837fcd5ae678189101a1b4831a53a1322ca6e8f5d644e4aa72e99bedb4a0e967c3e05ccdcc96137265612969a1214a71038dea845250cac45551963fe85f193d88aa39ed57b95b934295e17de04ebf0ad275df67f65fb1fc2ee3095c6af02c4c1b8efa570e1c2ac562601c5ac89bd6f59ca8b957660aa00787d4a0f9d9f29b15eb3b85823f7c9814eab9106210c37d863cf8413391c5941a994fdd52a44e4f8e8e4c9b8b520e62015fb5ed40e91e7a453b3ddcefb888fd896c187993a899b6a30d27a5b2b7847a410c0cce8b0fcf90367bfd8e6dfa7eb37676ecdf500c9a51ffb59792c13e222371e024f857134b7039931daa66a6902da37e71c41adf83846a9df1e75575696d7a6f1744d48e8215849773903c9475c29a1ec0fcc11257f9479467c2b65679a3da298e6806d619794dfc06b10b5e0a46e395c3ade3d750292f244cabb7172d83dbd42c6e3bd5a93a8c2d5fe84b23a3c60508733f5a087763f2fa514d18f891461b8ea22f7eaa847906182bd0415c28d197c06df8449cc2c6c2016c38672a67613a14ccac9025c4da85fc0825dcd9a1269e6064f80c0de445fbdd237d35ab0eb6ae468413c5b17c9955a8c8c34952c8a188bad7e5b18651a75b1c46cf116422378a94a19c31dfa634c8ab15f4f13e7e427741ab9e8f247b4a8fe2562986ee21f602b4fad45bd535718020b764da6f346e3b028db8a1af88419f3ea9141fcf0c622ed40d894814e5d60a9dcdfc8344f802c7b2f0089131e57ac0cc071af13c3b2b7302e9df4665c48b91f4ef0bb2a60a272e5841e0ee8da01a91773d41f295514b65ccb2190195f720d9838b3e7c701b51e813ef0262fbdbbe06391ba3fe4232e74523dfa933e6d3df2494ddd9f254afdf97623ceb5d32483a870cf72a57617bdbf97f0420c041edb5a884ff401dc21da0472d7a75d89dc9937fd65c3a422063ea44e3954435d38b8f34cec2c0360c8bef392f77fbab76a7b801e05b467d4980d20f0a7dbc1c39f50ce4429df1ec167c6be67d2fbd507a3f7b5d98cf214ae0510fac51e1075a06250d65a3a1179486bda5d982b7904682835079e3042f39a582492cd14dbafb5826e242c81998752043e2dd91b648f115900595f5191a01f187c4b6dea4917e4773a5fb28cb1d20508142a3905068c931a8c9a8fa291b92f8ece9884affd8787a5aa11858274879160e930587f3c32e2cabbd124c708641df09f82d05ab4db157ad24931dc36c616dbb778762ead6a8491ce8a48037106d382283ac69422c04af3ae2cbe22eff6dff21bc34b154a5fab666870c59aba65bd4e0ea0be3f394bb4901fd64a0e19293b8026188615c84601b7fecdb62b", "jlcirr."},
// 1-40a00000-john@HTTP-adc1.toor.com-TOOR.COM.kirbi file
{"$krb5tgs$23$e43ae991b15bbd11c19395c6c785f4d4$07ea84f4cf5ab2ad5a1a15c5776e7bc024d26451771e653c9cb0b87d8a5d73317f992913621a61039d143818585aee976b5273f53023d28a1da22c8a2f79e47956da4221bd10809fb777b4684cbbc102bda46dc816eb5a5315196f1b2cd47fee6ddc1adae753c96eefe77bf8e8e54e33489595f0c3cb47db9bef77438f666c15de4ee9893839c5280daebd81d476a00944f8282eed61af43578fc6f68dbb47ad9106ea1f58125355506016ccf997d35d8ccad169ba7eebe27e76d19188a227158172b405c7e053da1e3bafae4cd39594e7a03e7a96bdbc63a793fba6c26135d6d1789395f0155341e04f80097540ffb1f299f61960a34db3ea14b95b4633b7eea3a552140e7e42708009fdda3d1b42b3297142bfc036abd3d28f07ba1c8362e1c5b346f55af7214314a92fa412733825f55fe4a56b56859af00eb4f69cc7ad339b7bc8032ff1057be3e73c5533f4f546e599ecbf60305569c9b87b22971ef012ff92f582688b001ad23901dae743c46cae6603f7b6b88db78fcfd59997e8a1078f8a27e28a6628bc59d78674d9d16a6413da369ab58cb702dba01c710fbfed87f4665dfb3cc4a8f83ebf960435ae96973e699cd419324ddf115825c99890b2bb8e35ce0005a2adf95ce691b135358c63aa87088ed615c5a9667927e691bf7135677893abc41c038d25ff9091c14e3d1da85c7f0edaed32c9b3b56d2c473b2363b93aae5cc9b02db47e7a22a639a951e2edce7580f691c2ee0f8ebdfb02cdc6de8d1028e34085d1a69cdebb00a430b5ddce223bd1cc9c66b746a46584c098f051b97180ee8db4268a3a838504884df45227cac6fe9e73704877f04558c9640ac2ed33b3216b2e17805863a955285f4633407097f439d7063faeacdcee0d6d4e6c2adbe85df0e51eb3c08da1cedb4fa70ff74b2711a7294b499597c1f30c1dd3cc12751692311a16e22b3fa6af75eb0ace4170df497ba860445b1fc964771eafc034515918bb080a6d05ab1170708e6ce80bf9b00f808a2b814e89d0ac9b5d1a23686b48e99fdc50c71b5fef8a9bfc851e40bed59f69821109be0119151768e4d91b8b00c46b39af207ad4a2566ce7751ac124c3c5851cd1026052d34988272bf2851bd1a4536816a7635d83c1378b442eb04c15d5028763e0b189c8f45703c54d62aaea570c9e56b0e721d170cda74f91a4101c495fb565bb03f2ad635335c88db112dfb073bb4d1547de3214de5e371bfe9b440de3882f7b83593ca0fc60f4e6e2e3885b2a365a56b529904c74bc58ab38432f0dfbbd3f4d543f9d8685b0aa69aa807701e09e1253b6ed4948c7ceaaafdd0baed2663881d52a163101a5bb697a65b2bfcc54d0dd", "1qaz@WSX"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*saved_K1)[16];
static int any_cracked, *cracked;
static size_t cracked_size;
static int new_keys;
static struct custom_salt {
dyna_salt dsalt;
unsigned char edata1[16];
uint32_t edata2len;
unsigned char* edata2;
} *cur_salt;
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char *ptr, *keeptr;
int i;
ptr = mem_alloc_tiny(strlen(ciphertext) + 12 + 1, MEM_ALIGN_NONE);
keeptr = ptr;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) != 0) {
memcpy(ptr, "$krb5tgs$23$", 12);
ptr += 12;
}
for (i = 0; i < strlen(ciphertext) + 1; i++)
ptr[i] = tolower(ARCH_INDEX(ciphertext[i]));
return keeptr;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) == 0) {
/* handle 'chopped' .pot lines (they always have the tag!) */
if (ldr_isa_pot_source(ciphertext)) {
MEM_FREE(keeptr);
return 1;
}
ctcopy += 12;
if (ctcopy[0] == '*') { /* assume account's info provided */
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2; /* set after '$' */
goto edata;
}
if (ctcopy[0] == '$')
ctcopy++;
}
edata:
/* assume checksum */
if (((p = strtokm(ctcopy, "$")) == NULL) || strlen(p) != 32)
goto err;
/* assume edata2 following */
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_alloc_align(sizeof(*saved_key) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
saved_K1 = mem_alloc_align(sizeof(*saved_K1) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(saved_K1);
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p;
char *ctcopy;
char *keeptr;
static void *ptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
memset(&cs, 0, sizeof(cs));
cs.edata2 = NULL;
if (strncmp(ciphertext, "$krb5tgs$23$", 12) == 0) {
ctcopy += 12;
if (ctcopy[0] == '*') {
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2;
goto edata;
}
if (ctcopy[0]=='$')
ctcopy++;
}
edata:
if (((p = strtokm(ctcopy, "$")) != NULL) && strlen(p) == 32) { /* assume checksum */
for (i = 0; i < 16; i++) {
cs.edata1[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
/* skip '$' */
p += strlen(p) + 1;
/* retrieve non-constant length of edata2 */
for (i = 0; p[i] != '\0'; i++)
;
cs.edata2len = i/2;
cs.edata2 = (unsigned char*) mem_calloc_tiny(cs.edata2len + 1, sizeof(char));
for (i = 0; i < cs.edata2len; i++) { /* assume edata2 */
cs.edata2[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
}
MEM_FREE(keeptr);
/* following is used to fool dyna_salt stuff */
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, edata1);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, edata1, edata2len, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt**)salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, strlen(key) + 1);
new_keys = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
const unsigned char data[4] = {2, 0, 0, 0};
int index;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char K3[16];
#ifdef _MSC_VER
unsigned char ddata[65536];
#else
unsigned char ddata[cur_salt->edata2len + 1];
#endif
unsigned char checksum[16];
RC4_KEY rckey;
if (new_keys) {
MD4_CTX ctx;
unsigned char key[16];
UTF16 wkey[PLAINTEXT_LENGTH + 1];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index],
strlen(saved_key[index]));
if (len <= 0) {
saved_key[index][-len] = 0;
len = strlen16(wkey);
}
MD4_Init(&ctx);
MD4_Update(&ctx, (char*)wkey, 2 * len);
MD4_Final(key, &ctx);
hmac_md5(key, data, 4, saved_K1[index]);
}
hmac_md5(saved_K1[index], cur_salt->edata1, 16, K3);
RC4_set_key(&rckey, 16, K3);
RC4(&rckey, 32, cur_salt->edata2, ddata);
/*
8 first bytes are nonce, then ASN1 structures
(DER encoding: type-length-data)
if length >= 128 bytes:
length is on 2 bytes and type is
\x63\x82 (encode_krb5_enc_tkt_part)
and data is an ASN1 sequence \x30\x82
else:
length is on 1 byte and type is \x63\x81
and data is an ASN1 sequence \x30\x81
next headers follow the same ASN1 "type-length-data" scheme
*/
if (((!memcmp(ddata + 8, "\x63\x82", 2)) && (!memcmp(ddata + 16, "\xA0\x07\x03\x05", 4)))
||
((!memcmp(ddata + 8, "\x63\x81", 2)) && (!memcmp(ddata + 16, "\x03\x05\x00", 3)))) {
/* check the checksum to be sure */
RC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);
hmac_md5(saved_K1[index], ddata, cur_salt->edata2len, checksum);
if (!memcmp(checksum, cur_salt->edata1, 16)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
new_keys = 0;
return *pcount;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_krb5tgs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
MIN_PLAINTEXT_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT,
{NULL},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{NULL},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
task5.h | #pragma once
#include <iostream>
#include <string>
#include <random>
#include <algorithm>
#include <vector>
#include <chrono>
std::string randomString(const int& size, const int& seed)
{
std::string result;
const std::string VALID_CHARS = "a b c d e f ghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789,.-";
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, VALID_CHARS.size() - 1);
std::generate_n(std::back_inserter(result), size, [&]() {
return VALID_CHARS[distribution(generator)];
});
return result;
}
std::string randomSubstr(const std::string& s, const int& seed)
{
srand(seed);
const auto from = rand() % s.length();
const auto to = rand() % (s.length() - from) + from;
return s.substr(from, to);
}
inline bool maybeMatch(const std::string& s, const std::string& t, int pos)
{
const auto tEnd = t.length() + pos;
if (tEnd > s.length())
{
return false;
}
for (auto i = pos; i < tEnd; ++i)
{
if (s[i] != t[i - pos])
{
return false;
}
}
return true;
}
std::vector<int> search(const std::string& s, const std::string& t)
{
auto results = std::vector<int>();
const int end = s.length();
for (auto i = 0; i < end; ++i)
{
if (s[i] == t[0])
{
const auto out = maybeMatch(s, t, i);
if (out == -1)
{
results.push_back(i);
}
}
}
return results;
}
std::vector<int> searchP(const std::string& s, const std::string& t)
{
auto results = std::vector<int>();
const int end = s.length();
#pragma omp parallel for
for (auto i = 0; i < end; ++i)
{
if (s[i] == t[0])
{
const auto out = maybeMatch(s, t, i);
if (out == -1)
{
#pragma omp critical
results.push_back(i);
}
}
}
return results;
}
int task5(int seed = 0, int threads = 2)
{
std::cout << "\n---" << seed << "---" << threads << "---\n\n";
srand(seed);
auto begin = std::chrono::system_clock::now(), end = std::chrono::system_clock::now();
double tSeq = 0;
double tPar = 0;
std::vector<int> res;
omp_set_num_threads(threads);
for (auto i = 0; i < 20; ++i)
{
const int iterSeed = rand();
std::cout << "\r" << i + 1 << std::flush;
const auto s = randomString((rand() % 40000000) + 4 * 1e7, iterSeed);
const auto t = randomSubstr(s, iterSeed);
begin = std::chrono::system_clock::now();
for (auto r = 0; r < 5; ++r)
{
res = searchP(s, t);
}
end = std::chrono::system_clock::now();
tPar += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
/*
begin = std::chrono::system_clock::now();
for (auto r = 0; r < 5; ++r)
{
res = search(s, t);
}
end = std::chrono::system_clock::now();
tSeq += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
*/
}
std::cout << "\r"
// << "S:" << tSeq / 1000 << "\n"
<< "P:" << tPar / 1000 << std::endl;
if (seed == 0) {
return task5(777, threads);
}
if (seed == 777) {
return task5(421512, threads);
}
if (seed == 421512) {
if (threads == 2) {
return task5(0, 4);
}
if (threads == 4) {
return task5(0, 6);
}
if (threads == 6) {
return task5(0, 8);
}
if (threads == 8) {
return task5(0, 12);
}
}
} |
MS_Hybrid_dynamic.c | /*
MPI Dynamic Mandelbort sort
*/
#include <stdlib.h>
#include <mpi.h>
#include <X11/Xlib.h>
#include <stdio.h>
#include <omp.h>
#define data_tag 100
#define result_tag 101
#define terminate_tag 110
struct timeval tv1, tv2;
double t = 0;
typedef struct complextype
{
double real, imag;
} Compl;
int main(int argc, char *argv[])
{
// ----------input---------------
int size, rank;
int number_thread = atoi(argv[1]);
double leftR = atof(argv[2]);
double rightR = atof(argv[3]);
double lowerR = atof(argv[4]);
double upperR = atof(argv[5]);
int width = atoi(argv[6]);
int height = atoi(argv[7]);
char *en = argv[8];
int num_r;
int dis;
if( *en == 'e')dis = 1;
else dis = 0;
int q[24]={0};
// ------------MPI initial----------------
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_size (MPI_COMM_WORLD, &size);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0)gettimeofday(&tv1, NULL);
double t_start[24], total[24]={0};
double t=0;
double t_win = omp_get_wtime();
// ------- size =1
if(size == 1){
Display *display;
Window window; //initialization for a window
int screen; //which screen
/* open connection with the server */
GC gc;
XGCValues values;
long valuemask = 0;
if(dis == 1){
display = XOpenDisplay(NULL);
if(display == NULL) {
fprintf(stderr, "cannot open display\n");
exit(1);
}
screen = DefaultScreen(display);
/* set window position */
int x = 0;
int y = 0;
/* border width in pixels */
int border_width = 0;
/* create window */
window = XCreateSimpleWindow(display, RootWindow(display, screen), x, y, width, height, border_width,
BlackPixel(display, screen), WhitePixel(display, screen));
/* create graph */
gc = XCreateGC(display, window, valuemask, &values);
//XSetBackground (display, gc, WhitePixel (display, screen));
XSetForeground (display, gc, BlackPixel (display, screen));
XSetBackground(display, gc, 0X0000FF00);
XSetLineAttributes (display, gc, 1, LineSolid, CapRound, JoinRound);
/* map(show) the window */
XMapWindow(display, window);
XSync(display, 0);
}
/* draw points */
Compl z, c;
int repeats;
double temp, lengthsq;
int i, j;
omp_set_num_threads(number_thread);
#pragma omp parallel default(shared) private(j, z, c, repeats, lengthsq, temp)
{
#pragma omp for schedule(auto)
for(i=0; i<width; i++) {
for(j=0; j<height; j++) {
z.real = 0.0;
z.imag = 0.0;
c.real = leftR + (double)((double)i *((rightR-leftR)/(double)width));
c.imag = lowerR + (double)((double)j * ((upperR-lowerR)/(double)height));
repeats = 0;
lengthsq = 0.0;
while(repeats < 100000 && lengthsq < 4.0) { /* Theorem : If c belongs to M, then |Zn| <= 2. So Zn^2 <= 4 */
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
repeats++;
}
if(dis == 1){
#pragma omp critical
{
XSetForeground (display, gc, 1024 * 1024 * (repeats % 256));
XDrawPoint (display, window, gc, i, j);
}
}
}
}
}
if(dis == 1)XFlush(display);
}else{
// --------------display-------------
GC gc;
Display *display;
Window window;
if(rank==0){
if(dis){
//Display *display;
//Window window; //initialization for a window
int screen; //which screen
/* open connection with the server */
display = XOpenDisplay(NULL);
if(display == NULL) {
fprintf(stderr, "cannot open display\n");
return 0;
}
screen = DefaultScreen(display);
/* set window size */
//int width = 800;
//int height = 800;
/* set window position */
int x = 0;
int y = 0;
/* border width in pixels */
int border_width = 0;
/* create window */
window = XCreateSimpleWindow(display, RootWindow(display, screen), x, y, width, height, border_width,
BlackPixel(display, screen), WhitePixel(display, screen));
/* create graph */
//GC gc;
XGCValues values;
long valuemask = 0;
gc = XCreateGC(display, window, valuemask, &values);
//XSetBackground (display, gc, WhitePixel (display, screen));
XSetForeground (display, gc, BlackPixel (display, screen));
XSetBackground(display, gc, 0X0000FF00);
XSetLineAttributes (display, gc, 1, LineSolid, CapRound, JoinRound);
/* map(show) the window */
XMapWindow(display, window);
XSync(display, 0);
} // display-----------------------------------------
/* draw points */
//Compl z, c;
//int repeats;
//double temp, lengthsq;
int i;
int count = 0;
int *slave = malloc((width+1)*sizeof(int));
num_r = 0;
//send chunk = 1
for(i=0;i<size-1;i++){
MPI_Send(&num_r,1,MPI_INT,i+1,data_tag,MPI_COMM_WORLD);
count++;
num_r++;
}
while(count >0){
MPI_Recv(slave,width+1,MPI_INT,MPI_ANY_SOURCE,result_tag,MPI_COMM_WORLD,&status);
count--;
if(dis == 1){ // ---------draw row
for(i=0; i<width; i++){
XSetForeground (display, gc, 1024 * 1024 * (slave[i+1] % 256));
XDrawPoint (display, window, gc, i, *slave);
}
}
if(num_r<height){
MPI_Send(&num_r,1,MPI_INT,status.MPI_SOURCE,data_tag,MPI_COMM_WORLD);//CHUNK=1
count++;
num_r++;
}else{
MPI_Send(&num_r,1,MPI_INT,status.MPI_SOURCE,terminate_tag,MPI_COMM_WORLD);//CHUNK=1
}
}
if(dis == 1)XFlush(display);
t_win = omp_get_wtime()-t_win;
}else{
Compl z,c;
int repeats;
double temp, lengthsq;
int i, j;
int *Pi = malloc((width+1)*sizeof(int));
MPI_Recv(&num_r,1,MPI_INT,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status); //
t_win = omp_get_wtime()-t_win;
while(status.MPI_TAG == data_tag){
*Pi = num_r; //Pi[0] save this cpu do number row
//c.imag = lowerR + (double)((double)num_r * ((upperR-lowerR)/(double)height)); // imag is y axis
//printf(" %d ",num_r);
omp_set_num_threads(number_thread);
#pragma omp parallel num_threads(number_thread) default(shared) private(i,z, c, repeats, lengthsq, temp)
{ t_start[omp_get_thread_num()] = 0;
#pragma omp for schedule(dynamic)
for(i=0; i<width; i++) {
t_start[omp_get_thread_num()] = omp_get_wtime();
// for(j=0; j<height; j++) {
q[omp_get_thread_num()]++;
z.real = 0.0;
z.imag = 0.0;
c.real = leftR + (double)((double)i *((rightR-leftR)/(double)width)); /* Theorem : If c belongs to M(Mandelbrot set), then |c| <= 2 */
c.imag = lowerR + (double)((double)num_r * ((upperR-lowerR)/(double)height)); // if this statement is out of for , will be fault
repeats = 0;
lengthsq = 0.0;
while(repeats < 100000 && lengthsq < 4.0) { /* Theorem : If c belongs to M, then |Zn| <= 2. So Zn^2 <= 4 */
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
repeats++;
}
Pi[i+1] = repeats;
// XSetForeground (display, gc, 1024 * 1024 * (repeats % 256));
// XDrawPoint (display, window, gc, i, j);
total[omp_get_thread_num()]+=(omp_get_wtime()-t_start[omp_get_thread_num()]);
}
}
#pragma omp critical
{
MPI_Send(Pi,width+1,MPI_INT,0,result_tag,MPI_COMM_WORLD);
MPI_Recv(&num_r,1,MPI_INT,0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
}
}
}
}
int i;
for(i=0;i<number_thread;i++){
printf("%d %d\n",i+rank*number_thread,q[i]);
}
//printf("%d %d\n",rank,q[rank]);
if(rank == 0){
gettimeofday(&tv2, NULL);
t += (double)(tv2.tv_sec - tv1.tv_sec)+(double)(tv2.tv_usec - tv1.tv_usec)/1000000.0;
//printf("hyd=%lf\n", t);
}
// XFlush(display);
sleep(5);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
fitness.h | void calFit(Chromo *population, int N, int inicio, int fin)
{
int errores;
int k, i, j;
#pragma omp for
for (k = inicio; k < fin; k++)
{
errores = 0;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
if (i != j)
{
if ((population[k].config[i] - i) == (population[k].config[j] - j))
{
errores++;
}
if ((population[k].config[i] + i) == (population[k].config[j] + j))
{
errores++;
}
}
}
}
population[k].fitness = errores;
}
}
void copyBest(Chromo *Best, Chromo local, int N)
{
int i;
#pragma omp for
for (i = 0; i < N; i++)
{
Best->config[i] = local.config[i];
}
Best->fitness = local.fitness;
}
|
GB_binop__first_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_bool)
// A*D function (colscale): GB (_AxD__first_bool)
// D*A function (rowscale): GB (_DxB__first_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__first_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__first_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_binop__min_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int64)
// A*D function (colscale): GB (_AxD__min_int64)
// D*A function (rowscale): GB (_DxB__min_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int64)
// C=scalar+B GB (_bind1st__min_int64)
// C=scalar+B' GB (_bind1st_tran__min_int64)
// C=A+scalar GB (_bind2nd__min_int64)
// C=A'+scalar GB (_bind2nd_tran__min_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT64 || GxB_NO_MIN_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d; // + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
net_md5_fmt_plug.c | /* Cracker for RIPv2 MD5 authentication hashes.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Added linkage to dynamic (type dynamic_39) for any salt 230 bytes or less,
* by Jim Fougeron. Any salts > 239 bytes will still be handled by this full
* format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes
* of salt. I think we might be able to get 239 bytes (due to a few issues).
* 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts
* within dynamic. This is the FIRST format that is hybrid fat-thin.
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#ifndef DYNAMIC_DISABLED
#if FMT_EXTERNS_H
extern struct fmt_main fmt_netmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_netmd5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include "formats.h"
#include "dynamic.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "net-md5"
#define FORMAT_NAME "\"Keyed MD5\" RIPv2, OSPF, BGP, SNMPv2"
#define FORMAT_TAG "$netmd5$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
// RIPv2 truncates (or null pads) passwords to length 16
#define PLAINTEXT_LENGTH 16
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_SALT_LEN 1500
static struct fmt_tests tests[] = {
/* RIPv2 MD5 authentication hashes */
{ "02020000ffff0003002c01145267d48d000000000000000000020000ac100100ffffff000000000000000001ffff0001$1e372a8a233c6556253a0909bc3dcce6", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d48f000000000000000000020000ac100100ffffff000000000000000001ffff0001$ed9f940c3276afcc06d15babe8a1b61b", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d490000000000000000000020000ac100100ffffff000000000000000001ffff0001$c9f7763f80fcfcc2bbbca073be1f5df7", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49a000000000000000000020000ac100200ffffff000000000000000001ffff0001$3f6a72deeda200806230298af0797997", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49b000000000000000000020000ac100200ffffff000000000000000001ffff0001$b69184bacccc752cadf78cac455bd0de", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49d000000000000000000020000ac100100ffffff000000000000000001ffff0001$6442669c577e7662188865a54c105d0e", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267e076000000000000000000020000ac100200ffffff000000000000000001ffff0001$4afe22cf1750d9af8775b25bcf9cfb8c", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e077000000000000000000020000ac100200ffffff000000000000000001ffff0001$326b12f6da03048a655ea4d8f7e3e123", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e2ab000000000000000000020000ac100100ffffff000000000000000001ffff0001$ad76c40e70383f6993f54b4ba6492a26", "abcdefghijklmnop"},
/* OSPFv2 MD5 authentication hashes */
{"$netmd5$0201002cac1001010000000000000002000001105267ff8fffffff00000a0201000000280000000000000000$445ecbb27272bd791a757a6c85856150", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ff98ffffff00000a0201000000280000000000000000$d4c248b417b8cb1490e02c5e99eb0ad1", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ffa2ffffff00000a0201000000280000000000000000$528d9bf98be8213482af7295307625bf", "abcdefghijklmnop"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void get_ptr();
static void init(struct fmt_main *self);
static void done(void);
#define MAGIC 0xfe5dd5ef
static struct custom_salt {
uint32_t magic;
int length;
unsigned char salt[MAX_SALT_LEN]; // fixd len, but should be OK
} *cur_salt;
static int dyna_salt_seen=0;
static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough.
static struct fmt_main *pDynamicFmt, *pNetMd5_Dyna;
/* this function converts a 'native' net-md5 signature string into a $dynamic_39$ syntax string */
static char *Convert(char *Buf, char *ciphertext)
{
char *cp, *cp2;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
return ciphertext;
cp = strchr(&ciphertext[2], '$');
if (!cp)
return "*";
cp2 = strchr(&cp[1], '$');
if (!cp2)
return "*";
snprintf(Buf, sizeof(Conv_Buf), "$dynamic_39$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp);
return Buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > MAX_SALT_LEN * 2)
return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q)) {
get_ptr();
return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt);
}
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static char *pBuf=NULL;
struct custom_salt *cs;
char *orig_ct = ciphertext;
int i, len;
if (!pBuf) pBuf = (char *)mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
cs = (struct custom_salt*) pBuf;
memset(cs, 0, sizeof(*cs));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
if (len < 230) {
// return our memset buffer (putting the dyna salt pointer into it).
// This keeps the 'pre-cleaned salt() warning from hitting this format)
//return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct));
memcpy((char*)cs, pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size);
dyna_salt_seen=1;
return cs;
}
cs->magic = MAGIC;
cs->length = len;
return cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
// returns proper 16 bytes, so we do not need to copy into our buffer.
return pDynamicFmt->methods.binary(ciphertext);
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
get_ptr();
if (cur_salt->magic != MAGIC) {
pDynamicFmt->methods.set_salt(salt);
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.crypt_all(pcount, salt);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
MD5_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_all(binary, count);
}
for (; index < count; index++)
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_one(binary, index);
}
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void netmd5_set_key(char *key, int index)
{
if (dyna_salt_seen)
pDynamicFmt->methods.set_key(key, index);
/* strncpy will pad with zeros, which is needed */
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) {
get_ptr();
if (text_in_dynamic_format_already(pDynamicFmt, hash))
return hash;
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_netmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
netmd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
static void get_ptr() {
if (!pDynamicFmt) {
char *Buf;
pNetMd5_Dyna = mem_alloc_tiny(sizeof(fmt_netmd5), 16);
memcpy(pNetMd5_Dyna, &fmt_netmd5, sizeof(fmt_netmd5));
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 0);
fmt_netmd5.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt;
fmt_netmd5.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt;
Buf = mem_alloc_tiny(strlen(fmt_netmd5.params.algorithm_name) + 4 + strlen("dynamic_39") + 1, 1);
sprintf(Buf, "%s or %s", fmt_netmd5.params.algorithm_name, "dynamic_39");
fmt_netmd5.params.algorithm_name = Buf;
//pDynamicFmt->methods.init(pDynamicFmt);
}
}
static void init(struct fmt_main *self)
{
// We have to allocate our dyna_39 object first, because we get 'modified' min/max counts from there.
get_ptr();
if (self->private.initialized == 0) {
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 1);
self->private.initialized = 1;
}
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
pDynamicFmt->methods.done();
}
#endif /* plugin stanza */
#endif /* DYNAMIC_DISABLED */
|
silly-sort.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int i, j, n = 10000;
int nthreads = 2;
// Allocate input, output and position arrays
int *in = (int *)calloc(n, sizeof(int));
int *out = (int *)calloc(n, sizeof(int));
int **pos = (int **)calloc(nthreads, sizeof(int *));
// Initialize input array in the reverse order
for (i = 0; i < n; i++)
in[i] = n - i;
// Silly sort
#pragma omp parallel num_threads(nthreads) private(i, j)
{
int tid = omp_get_thread_num();
pos[tid] = (int *)calloc(n, sizeof(int));
#pragma omp for schedule(dynamic)
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
if (in[i] >= in[j])
pos[tid][j]++;
}
// Move elements to final position
for (i = 0; i < n; i++)
{
int pos_final = 0;
for (j = 0; j < nthreads; j++)
pos_final += pos[j][i];
out[n - pos_final] = in[i];
}
// Check if answer is correct
for (i = 0; i < n; i++)
if (i + 1 != out[i])
{
printf("test failed");
exit(0);
}
printf("test passed");
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
if (rotations == 0)
return(CloneImage(image,0,0,MagickTrue,exception));
if ((rotations == 1) || (rotations == 3))
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
else
rotate_image=CloneImage(image,0,0,MagickTrue,
exception);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
ssize_t
step;
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
DRB038-truedepseconddimension-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
for (i=0;i<n;i++)
#pragma omp parallel for
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
return 0;
}
|
GB_unop__one_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_int16_int16)
// op(A') function: GB (_unop_tran__one_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = 1 ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
KadabraBetweenness.h | /*
* KadabraBetweenness.h
*
* Created on: 18.07.2018
* Author: Eugenio Angriman, Alexander van der Grinten
*/
#ifndef KADABRA_H_
#define KADABRA_H_
#include <atomic>
#include "../auxiliary/SortedList.h"
#include "../base/Algorithm.h"
#include "../components/ConnectedComponents.h"
#include "../graph/Graph.h"
namespace NetworKit {
class Status {
public:
Status(const count k);
const count k;
std::vector<node> top;
std::vector<double> approxTop;
std::vector<bool> finished;
std::vector<double> bet;
std::vector<double> errL;
std::vector<double> errU;
count nPairs;
};
class SpSampler {
public:
SpSampler(const Graph &G, const ConnectedComponents &cc);
std::vector<node> randomPath();
private:
const Graph &G;
const count n;
Graph pred;
std::vector<count> ballInd;
std::vector<count> dist;
std::vector<count> nPaths;
std::vector<node> q;
const ConnectedComponents &cc;
inline node randomNode() const;
void backtrackPath(const node u, const node v, const node start,
std::vector<node> &path);
void removeAllEdges(const count endQ);
count getDegree(const Graph &graph, node y, bool useDegreeIn);
};
/**
* @ingroup centrality
* Approximation of the betweenness centrality and computation of the top-k
* nodes with highest betweenness centrality according to the algorithm
* described in Borassi M. and Natale M. (2016): KADABRA is an ADaptive
* Algorithm for Betweenness via Random Approximation.
*/
class KadabraBetweenness : public Algorithm {
public:
/**
* If k = 0 the algorithm approximates the betweenness centrality of all
* vertices of the graph so that the scores are within an additive error @a
* err with probability at least (1 - @a delta). Otherwise, the algorithm
* computes the exact ranking of the top-k nodes with highest betweenness
* centrality.
* The algorithm relies on an adaptive random sampling technique of shortest
* paths and the number of samples in the worst case is w = ((log(D - 2) +
* log(2/delta))/err^2 samples, where D is the diameter of the graph.
* Thus, the worst-case performance is O(w * (|E| + |V|)), but performs better
* in practice.
* NB: in order to work properly the Kadabra algorithm requires a random seed
* to be previously set with 'useThreadId' set to true.
*
* @param G the graph
* @param err maximum additive error guaranteed when approximating the
* betweenness centrality of all nodes.
* @param delta probability that the values of the betweenness centrality are
* within the error guarantee.
* @param k the number of top-k nodes to be computed. Set it to zero to
* approximate the betweenness centrality of all the nodes.
* @param unionSample, startFactor algorithm parameters that are automatically
* chosen.
*/
KadabraBetweenness(const Graph &G, const double err = 0.01,
const double delta = 0.1, const count k = 0,
count unionSample = 0, const count startFactor = 100);
/**
* Executes the Kadabra algorithm.
*/
void run() override;
/**
* @return The ranking of the nodes according to their approximated
* betweenness centrality.
*/
std::vector<std::pair<node, double>> ranking() const;
/**
* @return Nodes of the graph sorted by their approximated betweenness
* centrality.
*/
std::vector<node> topkNodesList() const {
checkHasRun();
return topkNodes;
}
/**
* @return Sorted list of approximated betweenness centrality scores.
*/
std::vector<double> topkScoresList() const {
checkHasRun();
return topkScores;
}
/**
* @return Approximated betweenness centrality score of all the nodes of the
* graph.
*/
std::vector<double> scores() const {
checkHasRun();
return approxSum;
}
/**
* @return Total number of samples.
*/
count getNumberOfIterations() const {
checkHasRun();
return nPairs;
}
/**
* @return Upper bound to the number of samples.
*/
double getOmega() const {
checkHasRun();
return omega;
}
protected:
const Graph &G;
const double delta, err;
const count k, n, startFactor;
count unionSample, omp_max_threads;
std::atomic<std::uint64_t> nPairs;
const bool absolute;
double deltaLMinGuess, deltaUMinGuess, omega;
std::vector<node> topkNodes;
std::vector<double> topkScores;
std::vector<std::pair<node, double>> rankingVector;
Aux::SortedList *top;
ConnectedComponents *cc;
std::vector<std::vector<double>> approx;
std::vector<double> approxSum;
std::vector<double> deltaLGuess;
std::vector<double> deltaUGuess;
const double balancingFactor = 0.001;
const unsigned short itersPerStep = 11;
void init();
void computeDeltaGuess();
void computeBetErr(Status *status, std::vector<double> &bet,
std::vector<double> &errL,
std::vector<double> &errU) const;
void oneRound(SpSampler &sampler);
bool computeFinished(Status *status) const;
void getStatus(Status *status, const bool parallel = false) const;
void computeApproxParallel(const bool normalize = false);
double computeF(const double btilde, const count iterNum,
const double deltaL) const;
double computeG(const double btilde, const count iterNum,
const double deltaU) const;
void fillResult();
void checkHasRun() const {
if (!hasRun) {
throw std::runtime_error("Call the run() method first.");
}
}
void fillPQ() {
for (count i = 0; i < n; ++i) {
top->insert(i, approxSum[i]);
}
}
};
inline std::vector<std::pair<node, double>>
KadabraBetweenness::ranking() const {
checkHasRun();
std::vector<std::pair<node, double>> result(topkNodes.size());
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(result.size()); ++i) {
result[i] = std::make_pair(topkNodes[i], topkScores[i]);
}
return result;
}
} // namespace NetworKit
#endif /* ifndef KADABRA_H_ */
|
bml_import_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_import_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Convert a dense matrix into a bml matrix.
*
* \ingroup convert_group
*
* \param N The number of rows/columns
* \param matrix_precision The real precision
* \param A The dense matrix
* \return The bml matrix
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_import_from_dense_ellpack) (
bml_dense_order_t order,
int N,
void *A,
double threshold,
int M,
bml_distribution_mode_t distrib_mode)
{
bml_matrix_ellpack_t *A_bml =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, distrib_mode);
int *A_index = A_bml->index;
int *A_nnz = A_bml->nnz;
REAL_T *dense_A = (REAL_T *) A;
REAL_T *A_value = A_bml->value;
#pragma omp parallel for shared(A_value, A_index, A_nnz, dense_A)
for (int i = 0; i < N; i++)
{
A_nnz[i] = 0;
for (int j = 0; j < N; j++)
{
REAL_T A_ij;
switch (order)
{
case dense_row_major:
A_ij = dense_A[ROWMAJOR(i, j, N, N)];
break;
case dense_column_major:
A_ij = dense_A[COLMAJOR(i, j, N, N)];
break;
default:
LOG_ERROR("unknown order\n");
break;
}
if (is_above_threshold(A_ij, threshold))
{
A_value[ROWMAJOR(i, A_nnz[i], N, M)] = A_ij;
A_index[ROWMAJOR(i, A_nnz[i], N, M)] = j;
A_nnz[i]++;
}
}
}
return A_bml;
}
|
multigrid_par.c | /**
* A parallel multigrid program for solving partial differential equations.
*
* Authors: Lucas Villarroel, Erik Hanstad
* Language: C using the OpenMP API
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define MAX_ITERATIONS 100000000
#define MAX_SIZE 10000
#define MAX_WORKERS 4
#define FEW_ITERATIONS 4
int numWorkers;
double maxDiff(double** grid, double** new, int size) {
int i, j;
double maxDiff = 0.0;
double myDiff = -1;
#pragma omp parallel for private(j, myDiff)
for (i = 1; i <= size; i++) {
for (j = 1; j <= size; j++) {
myDiff = fabs(grid[i][j] - new[i][j]);
if (myDiff > maxDiff) {
#pragma omp critical
{
if (myDiff > maxDiff)
maxDiff = myDiff;
}
}
}
}
return maxDiff;
}
void printGrid(double** grid, int size) {
FILE* fp = fopen("par-data.out", "w");
for (int i = 0; i < size; i++) {
fprintf(fp, "[");
for (int j = 0; j < size; j++) {
if (j == 0) {
fprintf(fp, "%.10f", grid[i][j]);
} else {
fprintf(fp, ", %.10f", grid[i][j]);
}
}
fprintf(fp, "]\n");
}
}
void jacobi(double** grid, double** new, int size, int numIters) {
int i, j;
for (int iter = 1; iter < numIters; iter++) {
#pragma omp parallel for private(j)
for (i = 1; i <= size; i++) {
for (j = 1; j <= size; j++) {
new[i][j] = (grid[i - 1][j] + grid[i + 1][j] + grid[i][j - 1] + grid[i][j + 1]) * 0.25;
}
}
#pragma omp parallel for private(j)
for (i = 1; i <= size; i++) {
for (j = 1; j <= size; j++) {
grid[i][j] = (new[i - 1][j] + new[i + 1][j] + new[i][j - 1] + new[i][j + 1]) * 0.25;
}
}
#ifdef debug
printf("iter %d/%d\n", iter, numIters);
#endif // debug
}
}
void restrictGrid(double** fine, double** coarse, int coarseSize) {
int x, y, i, j;
#pragma omp parallel for private(j, x, y)
for (i = 1; i <= coarseSize; i++) {
x = i * 2;
for (j = 1; j <= coarseSize; j++) {
y = j * 2;
coarse[i][j] = fine[x][y] * 0.5 + (fine[x - 1][y] + fine[x][y - 1] + fine[x][y + 1] + fine[x + 1][y]) * 0.125;
}
}
}
void interpolate(double** fine, double** coarse, int fineSize, int coarseSize) {
int x, y, i, j;
// assign coarse grid points to corresponding fine grid points
#pragma omp parallel for private(j, x, y)
for (i = 1; i <= coarseSize; i++) {
x = i * 2;
for (j = 1; j <= coarseSize; j++) {
y = j * 2;
fine[x][y] = coarse[i][j];
}
}
// update fine grid points in columns that were updated
#pragma omp parallel for private(j)
for (int i = 2; i <= fineSize; i += 2) {
for (int j = 1; j <= fineSize; j += 2) {
fine[i][j] = (fine[i - 1][j] + fine[i + 1][j]) * 0.5;
}
}
// update other fine grid points
#pragma omp parallel for private(j)
for (int i = 1; i <= fineSize; i++) {
for (int j = 2; j <= fineSize; j += 2) {
fine[i][j] = (fine[i][j - 1] + fine[i][j + 1]) * 0.5;
}
}
}
double** initGrid(int size) {
double** grid = malloc(size * sizeof(double));
for (int i = 0; i < size; i++) {
grid[i] = malloc(size * sizeof(double));
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
if ((i == 0) || (j == 0) || (j == (size - 1)) || (i == (size - 1))) {
grid[i][j] = 1;
} else {
grid[i][j] = 0;
}
}
}
return grid;
}
int main(int argc, char* argv[]) {
// take command line args
int size1 = (argc > 1) ? ((atoi(argv[1]) < MAX_SIZE) ? atoi(argv[1]) : MAX_SIZE) : MAX_SIZE;
int numIters = (argc > 2) ? ((atoi(argv[2]) < MAX_ITERATIONS) ? atoi(argv[2]) : MAX_ITERATIONS) : MAX_ITERATIONS;
numWorkers = (argc > 3) ? ((atoi(argv[3]) < MAX_WORKERS) ? atoi(argv[3]) : MAX_WORKERS) : MAX_WORKERS;
int size2 = (size1 * 2) + 1;
int size3 = (size2 * 2) + 1;
int size4 = (size3 * 2) + 1;
// allocate memory for grids
double** grid1 = initGrid(size1 + 2);
double** new1 = initGrid(size1 + 2);
double** grid2 = initGrid(size2 + 2);
double** new2 = initGrid(size2 + 2);
double** grid3 = initGrid(size3 + 2);
double** new3 = initGrid(size3 + 2);
double** grid4 = initGrid(size4 + 2);
double** new4 = initGrid(size4 + 2);
omp_set_num_threads(numWorkers);
// start timer
double start_time = omp_get_wtime();
// restrict from finest grid down
jacobi(grid4, new4, size4, FEW_ITERATIONS);
restrictGrid(grid4, grid3, size3);
jacobi(grid3, new3, size3, FEW_ITERATIONS);
restrictGrid(grid3, grid2, size2);
jacobi(grid2, new2, size2, FEW_ITERATIONS);
restrictGrid(grid2, grid1, size1);
// coarsest grid reached, compute solution
jacobi(grid1, new1, size1, numIters);
// interpolate back to finest grid
interpolate(grid2, grid1, size2, size1);
jacobi(grid2, new2, size2, FEW_ITERATIONS);
interpolate(grid3, grid2, size3, size2);
jacobi(grid3, new3, size3, FEW_ITERATIONS);
interpolate(grid4, grid3, size4, size3);
jacobi(grid4, new4, size4, FEW_ITERATIONS);
// calculate max difference
double diff = maxDiff(grid4, new4, size4);
// stop timer
double end_time = omp_get_wtime();
int msec = (end_time - start_time) * 1000;
printf("Size: %d, Iterations: %d\n", size1, numIters);
printf("Execution time: %d ms\n", msec);
printf("Maximum error %.5f\n", diff);
printGrid(grid4, size4 + 2);
return 0;
} |
omp_reduce.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include "config.h"
void NPomp_dsum_reduce_inplace(double **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double *dst = vec[thread_id];
double *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] += src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_dprod_reduce_inplace(double **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double *dst = vec[thread_id];
double *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] *= src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_zsum_reduce_inplace(double complex **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double complex *dst = vec[thread_id];
double complex *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] += src[i];
}
}
}
#pragma omp barrier
}
}
void NPomp_zprod_reduce_inplace(double complex **vec, size_t count)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
unsigned int bit, thread_src;
unsigned int mask = 0;
double complex *dst = vec[thread_id];
double complex *src;
size_t i;
#pragma omp barrier
for (bit = 0; (1<<bit) < nthreads; bit++) {
mask |= 1 << bit;
if (!(thread_id & mask)) {
thread_src = thread_id | (1<<bit);
if (thread_src < nthreads) {
src = vec[thread_src];
for (i = 0; i < count; i++) {
dst[i] *= src[i];
}
}
}
#pragma omp barrier
}
}
#ifdef _OPENMP
int get_omp_threads() {
return omp_get_max_threads();
}
int set_omp_threads(int n) {
omp_set_num_threads(n);
return n;
}
#else
// mimic omp_get_max_threads omp_set_num_threads function of libgomp
int get_omp_threads() { return 1; }
int set_omp_threads(int n) { return 0; }
#endif
|
dropout_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <random>
#include <string>
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#if defined(__NVCC__) || defined(__HIPCC__)
template <typename T, typename MaskType, int VecSize>
__global__ void DropoutGradCUDAKernel(const T* dout, const MaskType* mask,
const T factor, const int64_t size,
T* dx) {
int64_t idx = blockDim.x * blockIdx.x + threadIdx.x;
using LoadT = AlignedVector<T, VecSize>;
using MaskLoadT = AlignedVector<MaskType, VecSize>;
for (int i = idx * VecSize; i < size; i += blockDim.x * gridDim.x * VecSize) {
T dout_vec[VecSize];
LoadT* dout_value = reinterpret_cast<LoadT*>(&dout_vec);
*dout_value = *reinterpret_cast<const LoadT*>(&dout[i]);
MaskType mask_vec[VecSize];
MaskLoadT* mask_value = reinterpret_cast<MaskLoadT*>(&mask_vec);
*mask_value = *reinterpret_cast<const MaskLoadT*>(&mask[i]);
T dx_vec[VecSize];
#pragma unroll
for (int ii = 0; ii < VecSize; ii++) {
dx_vec[ii] = dout_vec[ii] * static_cast<T>(mask_vec[ii]) * factor;
}
*(reinterpret_cast<LoadT*>(&dx[i])) = *reinterpret_cast<LoadT*>(&dx_vec[0]);
}
}
#endif
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (!context.Attr<bool>("is_test")) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
// Special case when dropout_prob is 1.0
if (dropout_prob == 1.0f) {
std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
// std::minstd_rand engine;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
int seed_data = 0;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0;
}
auto engine = framework::GetCPURandomEngine(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
if (dist(*engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
if (upscale_in_train) {
y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob);
} else {
y_data[i] = x_data[i];
}
}
}
} else {
if (upscale_in_train) {
const auto* X_data = x->data<T>();
auto* Y_data = y->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < x->numel(); i++) {
Y_data[i] = X_data[i];
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
template <typename DeviceContext, typename T>
class DropoutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(!context.Attr<bool>("is_test"), true,
platform::errors::PreconditionNotMet(
"GradOp is only callable when is_test is false"));
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto size = grad_x->numel();
auto M = EigenVector<uint8_t>::Flatten(*mask);
auto dX = EigenVector<T>::Flatten(*grad_x);
auto dY = EigenVector<T>::Flatten(*grad_y);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
if (dropout_implementation == "upscale_in_train") {
float dropout_prob = context.Attr<float>("dropout_prob");
if (dropout_prob == 1.0f) {
dX.device(place) = static_cast<T>(0) * dY;
} else {
int vec_size = VectorizedSize<T>(grad_y->data<T>());
if (platform::is_gpu_place(context.GetPlace()) && vec_size == 4 &&
size % 4 == 0) {
#if defined(__NVCC__) || defined(__HIPCC__)
auto factor = static_cast<T>(1.0f / (1.0f - dropout_prob));
auto stream = context.cuda_device_context().stream();
platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(
context.cuda_device_context(), size);
DropoutGradCUDAKernel<
T, uint8_t,
4><<<config.block_per_grid, config.thread_per_block, 0, stream>>>(
grad_y->data<T>(), mask->data<uint8_t>(), factor, size,
grad_x->data<T>());
#endif
} else {
dX.device(place) =
dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob);
}
}
} else {
dX.device(place) = dY * M.cast<T>();
}
}
};
} // namespace operators
} // namespace paddle
|
taskloop_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}}
#pragma omp taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}}
#pragma omp taskloop simd foo
void test_no_clause() {
int i;
#pragma omp taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}}
#pragma omp taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp taskloop simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
declare_reduction_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in){{$}}
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: #pragma omp declare reduction (fun : float : omp_out += omp_in) initializer(omp_priv = omp_orig + 15)
// CHECK: struct SSS {
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: #pragma omp declare reduction (+ : int : omp_out *= omp_in)
// CHECK-NEXT: #pragma omp declare reduction (+ : char : omp_out *= omp_in)
};
// CHECK: };
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: int main() {
int main() {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: #pragma omp declare reduction (fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
}
return 0;
}
// CHECK: }
#endif
|
image.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/image.c"
#else
#undef MAX
#define MAX(a,b) ( ((a)>(b)) ? (a) : (b) )
#undef MIN
#define MIN(a,b) ( ((a)<(b)) ? (a) : (b) )
#undef TAPI
#define TAPI __declspec(dllimport)
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#undef temp_t
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#define temp_t real
#else
#define temp_t float
#endif
static inline real image_(FromIntermediate)(temp_t x) {
#ifdef TH_REAL_IS_BYTE
x += 0.5;
if( x <= 0 ) return 0;
if( x >= 255 ) return 255;
#endif
return x;
}
static void image_(Main_op_validate)( lua_State *L, THTensor *Tsrc, THTensor *Tdst){
long src_depth = 1;
long dst_depth = 1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
if(Tdst->nDimension == 3) dst_depth = Tdst->size[0];
if(Tsrc->nDimension == 3) src_depth = Tsrc->size[0];
if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) ||
(Tdst->nDimension!=Tsrc->nDimension) )
luaL_error(L, "image.scale: src and dst depths do not match");
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.scale: src and dst depths do not match");
}
static long image_(Main_op_stride)( THTensor *T,int i){
if (T->nDimension == 2) {
if (i == 0) return 0;
else return T->stride[i-1];
}
return T->stride[i];
}
static long image_(Main_op_depth)( THTensor *T){
if(T->nDimension == 3) return T->size[0]; /* rgb or rgba */
return 1; /* greyscale */
}
static void image_(Main_scaleLinear_rowcol)(THTensor *Tsrc,
THTensor *Tdst,
long src_start,
long dst_start,
long src_stride,
long dst_stride,
long src_len,
long dst_len ) {
real *src= THTensor_(data)(Tsrc);
real *dst= THTensor_(data)(Tdst);
if ( dst_len > src_len ){
long di;
float si_f;
long si_i;
float scale = (float)(src_len - 1) / (dst_len - 1);
if ( src_len == 1 ) {
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
dst[dst_pos] = src[ src_start ];
}
} else {
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
si_f = di * scale; si_i = (long)si_f; si_f -= si_i;
dst[dst_pos] = image_(FromIntermediate)(
(1 - si_f) * src[ src_start + si_i * src_stride ] +
si_f * src[ src_start + (si_i + 1) * src_stride ]);
}
}
dst[ dst_start + (dst_len - 1) * dst_stride ] =
src[ src_start + (src_len - 1) * src_stride ];
}
else if ( dst_len < src_len ) {
long di;
long si0_i = 0; float si0_f = 0;
long si1_i; float si1_f;
long si;
float scale = (float)src_len / dst_len;
float acc, n;
for( di = 0; di < dst_len; di++ )
{
si1_f = (di + 1) * scale; si1_i = (long)si1_f; si1_f -= si1_i;
acc = (1 - si0_f) * src[ src_start + si0_i * src_stride ];
n = 1 - si0_f;
for( si = si0_i + 1; si < si1_i; si++ )
{
acc += src[ src_start + si * src_stride ];
n += 1;
}
if( si1_i < src_len )
{
acc += si1_f * src[ src_start + si1_i*src_stride ];
n += si1_f;
}
dst[ dst_start + di*dst_stride ] = image_(FromIntermediate)(acc / n);
si0_i = si1_i; si0_f = si1_f;
}
}
else {
long i;
for( i = 0; i < dst_len; i++ )
dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ];
}
}
static inline temp_t image_(Main_cubicInterpolate)(temp_t p0,
temp_t p1,
temp_t p2,
temp_t p3,
temp_t x) {
temp_t a0 = p1;
temp_t a1 = p2 - p0;
temp_t a2 = 2 * p0 - 5 * p1 + 4 * p2 - p3;
temp_t a3 = 3 * (p1 - p2) + p3 - p0;
return a0 + 0.5 * x * (a1 + x * (a2 + x * a3));
}
static void image_(Main_scaleCubic_rowcol)(THTensor *Tsrc,
THTensor *Tdst,
long src_start,
long dst_start,
long src_stride,
long dst_stride,
long src_len,
long dst_len ) {
real *src= THTensor_(data)(Tsrc);
real *dst= THTensor_(data)(Tdst);
if ( dst_len == src_len ){
long i;
for( i = 0; i < dst_len; i++ )
dst[ dst_start + i*dst_stride ] = src[ src_start + i*src_stride ];
} else if ( src_len == 1 ) {
long i;
for( i = 0; i < dst_len - 1; i++ ) {
long dst_pos = dst_start + i*dst_stride;
dst[dst_pos] = src[ src_start ];
}
} else {
long di;
float si_f;
long si_i;
float scale;
if (dst_len == 1)
scale = (float)(src_len - 1);
else
scale = (float)(src_len - 1) / (dst_len - 1);
for( di = 0; di < dst_len - 1; di++ ) {
long dst_pos = dst_start + di*dst_stride;
si_f = di * scale; si_i = (long)si_f; si_f -= si_i;
temp_t p0;
temp_t p1 = src[ src_start + si_i * src_stride ];
temp_t p2 = src[ src_start + (si_i + 1) * src_stride ];
temp_t p3;
if (si_i > 0) {
p0 = src[ src_start + (si_i - 1) * src_stride ];
} else {
p0 = 2 * p1 - p2;
}
if (si_i + 2 < src_len) {
p3 = src[ src_start + (si_i + 2) * src_stride ];
} else {
p3 = 2 * p2 - p1;
}
temp_t value = image_(Main_cubicInterpolate)(p0, p1, p2, p3, si_f);
dst[dst_pos] = image_(FromIntermediate)(value);
}
dst[ dst_start + (dst_len - 1) * dst_stride ] =
src[ src_start + (src_len - 1) * src_stride ];
}
}
static int image_(Main_scaleBilinear)(lua_State *L) {
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
THTensor *Ttmp;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height;
long src_stride0, src_stride1, src_stride2, src_width, src_height;
long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height;
long i, j, k;
image_(Main_op_validate)(L, Tsrc,Tdst);
int ndims;
if (Tdst->nDimension == 3) ndims = 3;
else ndims = 2;
Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]);
dst_stride0= image_(Main_op_stride)(Tdst,0);
dst_stride1= image_(Main_op_stride)(Tdst,1);
dst_stride2= image_(Main_op_stride)(Tdst,2);
src_stride0= image_(Main_op_stride)(Tsrc,0);
src_stride1= image_(Main_op_stride)(Tsrc,1);
src_stride2= image_(Main_op_stride)(Tsrc,2);
tmp_stride0= image_(Main_op_stride)(Ttmp,0);
tmp_stride1= image_(Main_op_stride)(Ttmp,1);
tmp_stride2= image_(Main_op_stride)(Ttmp,2);
dst_width= Tdst->size[ndims-1];
dst_height= Tdst->size[ndims-2];
src_width= Tsrc->size[ndims-1];
src_height= Tsrc->size[ndims-2];
tmp_width= Ttmp->size[1];
tmp_height= Ttmp->size[0];
for(k=0;k<image_(Main_op_depth)(Tsrc);k++) {
/* compress/expand rows first */
for(j = 0; j < src_height; j++) {
image_(Main_scaleLinear_rowcol)(Tsrc,
Ttmp,
0*src_stride2+j*src_stride1+k*src_stride0,
0*tmp_stride2+j*tmp_stride1+k*tmp_stride0,
src_stride2,
tmp_stride2,
src_width,
tmp_width );
}
/* then columns */
for(i = 0; i < dst_width; i++) {
image_(Main_scaleLinear_rowcol)(Ttmp,
Tdst,
i*tmp_stride2+0*tmp_stride1+k*tmp_stride0,
i*dst_stride2+0*dst_stride1+k*dst_stride0,
tmp_stride1,
dst_stride1,
tmp_height,
dst_height );
}
}
THTensor_(free)(Ttmp);
return 0;
}
static int image_(Main_scaleBicubic)(lua_State *L) {
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
THTensor *Ttmp;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height;
long src_stride0, src_stride1, src_stride2, src_width, src_height;
long tmp_stride0, tmp_stride1, tmp_stride2, tmp_width, tmp_height;
long i, j, k;
image_(Main_op_validate)(L, Tsrc,Tdst);
int ndims;
if (Tdst->nDimension == 3) ndims = 3;
else ndims = 2;
Ttmp = THTensor_(newWithSize2d)(Tsrc->size[ndims-2], Tdst->size[ndims-1]);
dst_stride0= image_(Main_op_stride)(Tdst,0);
dst_stride1= image_(Main_op_stride)(Tdst,1);
dst_stride2= image_(Main_op_stride)(Tdst,2);
src_stride0= image_(Main_op_stride)(Tsrc,0);
src_stride1= image_(Main_op_stride)(Tsrc,1);
src_stride2= image_(Main_op_stride)(Tsrc,2);
tmp_stride0= image_(Main_op_stride)(Ttmp,0);
tmp_stride1= image_(Main_op_stride)(Ttmp,1);
tmp_stride2= image_(Main_op_stride)(Ttmp,2);
dst_width= Tdst->size[ndims-1];
dst_height= Tdst->size[ndims-2];
src_width= Tsrc->size[ndims-1];
src_height= Tsrc->size[ndims-2];
tmp_width= Ttmp->size[1];
tmp_height= Ttmp->size[0];
for(k=0;k<image_(Main_op_depth)(Tsrc);k++) {
/* compress/expand rows first */
for(j = 0; j < src_height; j++) {
image_(Main_scaleCubic_rowcol)(Tsrc,
Ttmp,
0*src_stride2+j*src_stride1+k*src_stride0,
0*tmp_stride2+j*tmp_stride1+k*tmp_stride0,
src_stride2,
tmp_stride2,
src_width,
tmp_width );
}
/* then columns */
for(i = 0; i < dst_width; i++) {
image_(Main_scaleCubic_rowcol)(Ttmp,
Tdst,
i*tmp_stride2+0*tmp_stride1+k*tmp_stride0,
i*dst_stride2+0*dst_stride1+k*dst_stride0,
tmp_stride1,
dst_stride1,
tmp_height,
dst_height );
}
}
THTensor_(free)(Ttmp);
return 0;
}
static int image_(Main_scaleSimple)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float scx, scy;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "image.scale: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "image.scale: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( (Tdst->nDimension==3 && ( src_depth!=dst_depth)) ||
(Tdst->nDimension!=Tsrc->nDimension) ) {
printf("image.scale:%d,%d,%ld,%ld\n",Tsrc->nDimension,Tdst->nDimension,src_depth,dst_depth);
luaL_error(L, "image.scale: src and dst depths do not match");
}
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.scale: src and dst depths do not match");
/* printf("%d,%d -> %d,%d\n",src_width,src_height,dst_width,dst_height); */
scx=((float)src_width)/((float)dst_width);
scy=((float)src_height)/((float)dst_height);
#pragma omp parallel for private(j, i, k)
for(j = 0; j < dst_height; j++) {
for(i = 0; i < dst_width; i++) {
float val = 0.0;
long ii=(long) (((float)i)*scx);
long jj=(long) (((float)j)*scy);
if(ii>src_width-1) ii=src_width-1;
if(jj>src_height-1) jj=src_height-1;
if(Tsrc->nDimension==2)
{
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
for(k=0;k<src_depth;k++)
{
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_rotate)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float theta = luaL_checknumber(L, 3);
float cos_theta, sin_theta;
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float xc, yc;
float id,jd;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
if (dst == src) {
luaL_error(L, "image.rotate: in-place rotate not supported");
}
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.rotate: src and dst depths do not match");
if( (Tsrc->nDimension!=Tdst->nDimension) )
luaL_error(L, "image.rotate: src and dst depths do not match");
xc = (src_width-1)/2.0;
yc = (src_height-1)/2.0;
sin_theta = sin(theta);
cos_theta = cos(theta);
for(j = 0; j < dst_height; j++) {
jd=j;
for(i = 0; i < dst_width; i++) {
float val = -1;
id= i;
ii = (long) round(cos_theta*(id-xc) - sin_theta*(jd-yc) + xc);
jj = (long) round(cos_theta*(jd-yc) + sin_theta*(id-xc) + yc);
/* rotated corners are blank */
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_rotateBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float theta = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float xc, yc;
float id,jd;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
if (dst == src) {
luaL_error(L, "image.rotate: in-place rotate not supported");
}
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.rotate: src and dst depths do not match");
if( (Tsrc->nDimension!=Tdst->nDimension) )
luaL_error(L, "image.rotate: src and dst depths do not match");
xc = (src_width-1)/2.0;
yc = (src_height-1)/2.0;
for(j = 0; j < dst_height; j++) {
jd=j;
for(i = 0; i < dst_width; i++) {
float val = -1;
temp_t ri, rj, wi, wj;
id= i;
ri = cos(theta)*(id-xc)-sin(theta)*(jd-yc);
rj = cos(theta)*(jd-yc)+sin(theta)*(id-xc);
ii_0 = (long)floor(ri+xc);
ii_1 = ii_0 + 1;
jj_0 = (long)floor(rj+yc);
jj_1 = jj_0 + 1;
wi = ri+xc-ii_0;
wj = rj+yc-jj_0;
/* default to the closest value when interpolating on image boundaries (either image pixel or 0) */
if(ii_1==src_width && wi<0.5) ii_1 = ii_0;
else if(ii_1>=src_width) val=0;
if(jj_1==src_height && wj<0.5) jj_1 = jj_0;
else if(jj_1>=src_height) val=0;
if(ii_0==-1 && wi>0.5) ii_0 = ii_1;
else if(ii_0<0) val=0;
if(jj_0==-1 && wj>0.5) jj_0 = jj_1;
else if(jj_0<0) val=0;
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_polar)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
id = (float) i;
r = (m * id) / (float) dst_width; // current distance
jj = (long) floor( r * cos(a) + midY); // y-location in source image
ii = (long) floor(-r * sin(a) + midX); // x-location in source image
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_polarBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
temp_t ri, rj, wi, wj;
id = (float) i;
r = (m * id) / (float) dst_width; // current distance
rj = r * cos(a) + midY; // y-location in source image
ri = -r * sin(a) + midX; // x-location in source image
ii_0=(long)floor(ri);
ii_1=ii_0 + 1;
jj_0=(long)floor(rj);
jj_1=jj_0 + 1;
wi = ri - ii_0;
wj = rj - jj_0;
// switch to nearest interpolation when bilinear is impossible
if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) {
if(ii_0>src_width-1) val=0;
if(jj_0>src_height-1) val=0;
if(ii_0<0) val=0;
if(jj_0<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii_0*src_stride2+jj_0*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
// bilinear interpolation
else {
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
}
return 0;
}
static int image_(Main_logPolar)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX, fw;
long ii,jj;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
fw = log(m) / (float) dst_width;
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
id = (float) i;
r = exp(id * fw);
jj = (long) floor( r * cos(a) + midY); // y-location in source image
ii = (long) floor(-r * sin(a) + midX); // x-location in source image
if(ii>src_width-1) val=0;
if(jj>src_height-1) val=0;
if(ii<0) val=0;
if(jj<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_logPolarBilinear)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
float doFull = luaL_checknumber(L, 3);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
float id, jd, a, r, m, midY, midX, fw;
long ii_0, ii_1, jj_0, jj_1;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "polar: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "polar: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tsrc->nDimension==3 && Tdst->nDimension==3 && ( src_depth!=dst_depth) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
if( (Tsrc->nDimension!=Tdst->nDimension) ) {
luaL_error(L, "image.polar: src and dst depths do not match"); }
// compute maximum distance
midY = (float) src_height / 2.0;
midX = (float) src_width / 2.0;
if(doFull == 1) {
m = sqrt((float) src_width * (float) src_width + (float) src_height * (float) src_height) / 2.0;
}
else {
m = (src_width < src_height) ? midX : midY;
}
// loop to fill polar image
fw = log(m) / (float) dst_width;
for(j = 0; j < dst_height; j++) { // orientation loop
jd = (float) j;
a = (2 * M_PI * jd) / (float) dst_height; // current angle
for(i = 0; i < dst_width; i++) { // radius loop
float val = -1;
float ri, rj, wi, wj;
id = (float) i;
r = exp(id * fw);
rj = r * cos(a) + midY; // y-location in source image
ri = -r * sin(a) + midX; // x-location in source image
ii_0=(long)floor(ri);
ii_1=ii_0 + 1;
jj_0=(long)floor(rj);
jj_1=jj_0 + 1;
wi = ri - ii_0;
wj = rj - jj_0;
// switch to nearest interpolation when bilinear is impossible
if(ii_1>src_width-1 || jj_1>src_height-1 || ii_0<0 || jj_0<0) {
if(ii_0>src_width-1) val=0;
if(jj_0>src_height-1) val=0;
if(ii_0<0) val=0;
if(jj_0<0) val=0;
if(Tsrc->nDimension==2)
{
if(val==-1)
val=src[ii_0*src_stride2+jj_0*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++)
{
if(do_copy)
val=src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
// bilinear interpolation
else {
if(Tsrc->nDimension==2) {
if(val==-1)
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
} else {
int do_copy=0; if(val==-1) do_copy=1;
for(k=0;k<src_depth;k++) {
if(do_copy) {
val = (1.0 - wi) * (1.0 - wj) * src[ii_0*src_stride2+jj_0*src_stride1+k*src_stride0]
+ wi * (1.0 - wj) * src[ii_1*src_stride2+jj_0*src_stride1+k*src_stride0]
+ (1.0 - wi) * wj * src[ii_0*src_stride2+jj_1*src_stride1+k*src_stride0]
+ wi * wj * src[ii_1*src_stride2+jj_1*src_stride1+k*src_stride0];
}
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
}
return 0;
}
static int image_(Main_cropNoScale)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
long startx = luaL_checklong(L, 3);
long starty = luaL_checklong(L, 4);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 0;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 0;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 0;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 0;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( startx<0 || starty<0 || (startx+dst_width>src_width) || (starty+dst_height>src_height))
luaL_error(L, "image.crop: crop goes outside bounds of src");
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.crop: src and dst depths do not match");
for(j = 0; j < dst_height; j++) {
for(i = 0; i < dst_width; i++) {
float val = 0.0;
long ii=i+startx;
long jj=j+starty;
if(Tsrc->nDimension==2)
{
val=src[ii*src_stride2+jj*src_stride1];
dst[i*dst_stride2+j*dst_stride1] = image_(FromIntermediate)(val);
}
else
{
for(k=0;k<src_depth;k++)
{
val=src[ii*src_stride2+jj*src_stride1+k*src_stride0];
dst[i*dst_stride2+j*dst_stride1+k*dst_stride0] = image_(FromIntermediate)(val);
}
}
}
}
return 0;
}
static int image_(Main_translate)(lua_State *L)
{
THTensor *Tsrc = luaT_checkudata(L, 1, torch_Tensor);
THTensor *Tdst = luaT_checkudata(L, 2, torch_Tensor);
long shiftx = luaL_checklong(L, 3);
long shifty = luaL_checklong(L, 4);
real *src, *dst;
long dst_stride0, dst_stride1, dst_stride2, dst_width, dst_height, dst_depth;
long src_stride0, src_stride1, src_stride2, src_width, src_height, src_depth;
long i, j, k;
luaL_argcheck(L, Tsrc->nDimension==2 || Tsrc->nDimension==3, 1, "rotate: src not 2 or 3 dimensional");
luaL_argcheck(L, Tdst->nDimension==2 || Tdst->nDimension==3, 2, "rotate: dst not 2 or 3 dimensional");
src= THTensor_(data)(Tsrc);
dst= THTensor_(data)(Tdst);
dst_stride0 = 1;
dst_stride1 = Tdst->stride[Tdst->nDimension-2];
dst_stride2 = Tdst->stride[Tdst->nDimension-1];
dst_depth = 1;
dst_height = Tdst->size[Tdst->nDimension-2];
dst_width = Tdst->size[Tdst->nDimension-1];
if(Tdst->nDimension == 3) {
dst_stride0 = Tdst->stride[0];
dst_depth = Tdst->size[0];
}
src_stride0 = 1;
src_stride1 = Tsrc->stride[Tsrc->nDimension-2];
src_stride2 = Tsrc->stride[Tsrc->nDimension-1];
src_depth = 1;
src_height = Tsrc->size[Tsrc->nDimension-2];
src_width = Tsrc->size[Tsrc->nDimension-1];
if(Tsrc->nDimension == 3) {
src_stride0 = Tsrc->stride[0];
src_depth = Tsrc->size[0];
}
if( Tdst->nDimension==3 && ( src_depth!=dst_depth) )
luaL_error(L, "image.translate: src and dst depths do not match");
for(j = 0; j < src_height; j++) {
for(i = 0; i < src_width; i++) {
long ii=i+shiftx;
long jj=j+shifty;
// Check it's within destination bounds, else crop
if(ii<dst_width && jj<dst_height && ii>=0 && jj>=0) {
for(k=0;k<src_depth;k++) {
dst[ii*dst_stride2+jj*dst_stride1+k*dst_stride0] = src[i*src_stride2+j*src_stride1+k*src_stride0];
}
}
}
}
return 0;
}
static int image_(Main_saturate)(lua_State *L) {
#ifdef TH_REAL_IS_BYTE
// Noop since necessarily constrained to [0, 255].
#else
THTensor *input = luaT_checkudata(L, 1, torch_Tensor);
THTensor *output = input;
TH_TENSOR_APPLY2(real, output, real, input, \
*output_data = (*input_data < 0) ? 0 : (*input_data > 1) ? 1 : *input_data;)
#endif
return 1;
}
/*
* Converts an RGB color value to HSL. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes r, g, and b are contained in the set [0, 1] and
* returns h, s, and l in the set [0, 1].
*/
int image_(Main_rgb2hsl)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *hsl = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
temp_t r, g, b, h, s, l;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
#ifdef TH_REAL_IS_BYTE
r /= 255;
g /= 255;
b /= 255;
#endif
temp_t mx = max(max(r, g), b);
temp_t mn = min(min(r, g), b);
if(mx == mn) {
h = 0; // achromatic
s = 0;
l = mx;
} else {
temp_t d = mx - mn;
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
h /= 6;
l = (mx + mn) / 2;
s = l > 0.5 ? d / (2 - mx - mn) : d / (mx + mn);
}
// set hsl
#ifdef TH_REAL_IS_BYTE
h *= 255;
s *= 255;
l *= 255;
#endif
THTensor_(set3d)(hsl, 0, y, x, image_(FromIntermediate)(h));
THTensor_(set3d)(hsl, 1, y, x, image_(FromIntermediate)(s));
THTensor_(set3d)(hsl, 2, y, x, image_(FromIntermediate)(l));
}
}
return 0;
}
// helper
static inline temp_t image_(hue2rgb)(temp_t p, temp_t q, temp_t t) {
if (t < 0.) t += 1;
if (t > 1.) t -= 1;
if (t < 1./6)
return p + (q - p) * 6. * t;
else if (t < 1./2)
return q;
else if (t < 2./3)
return p + (q - p) * (2./3 - t) * 6.;
else
return p;
}
/*
* Converts an HSL color value to RGB. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes h, s, and l are contained in the set [0, 1] and
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_hsl2rgb)(lua_State *L) {
THTensor *hsl = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
temp_t r, g, b, h, s, l;
for (y=0; y<hsl->size[1]; y++) {
for (x=0; x<hsl->size[2]; x++) {
// get hsl
h = THTensor_(get3d)(hsl, 0, y, x);
s = THTensor_(get3d)(hsl, 1, y, x);
l = THTensor_(get3d)(hsl, 2, y, x);
#ifdef TH_REAL_IS_BYTE
h /= 255;
s /= 255;
l /= 255;
#endif
if(s == 0) {
// achromatic
r = l;
g = l;
b = l;
} else {
temp_t q = (l < 0.5) ? (l * (1 + s)) : (l + s - l * s);
temp_t p = 2 * l - q;
temp_t hr = h + 1./3;
temp_t hg = h;
temp_t hb = h - 1./3;
r = image_(hue2rgb)(p, q, hr);
g = image_(hue2rgb)(p, q, hg);
b = image_(hue2rgb)(p, q, hb);
}
// set rgb
#ifdef TH_REAL_IS_BYTE
r *= 255;
g *= 255;
b *= 255;
#endif
THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r));
THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g));
THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b));
}
}
return 0;
}
/*
* Converts an RGB color value to HSV. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSV_color_space.
* Assumes r, g, and b are contained in the set [0, 1] and
* returns h, s, and v in the set [0, 1].
*/
int image_(Main_rgb2hsv)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *hsv = luaT_checkudata(L, 2, torch_Tensor);
int y, x;
temp_t r, g, b, h, s, v;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
#ifdef TH_REAL_IS_BYTE
r /= 255;
g /= 255;
b /= 255;
#endif
temp_t mx = max(max(r, g), b);
temp_t mn = min(min(r, g), b);
if(mx == mn) {
// achromatic
h = 0;
s = 0;
v = mx;
} else {
temp_t d = mx - mn;
if (mx == r) {
h = (g - b) / d + (g < b ? 6 : 0);
} else if (mx == g) {
h = (b - r) / d + 2;
} else {
h = (r - g) / d + 4;
}
h /= 6;
s = d / mx;
v = mx;
}
// set hsv
#ifdef TH_REAL_IS_BYTE
h *= 255;
s *= 255;
v *= 255;
#endif
THTensor_(set3d)(hsv, 0, y, x, image_(FromIntermediate)(h));
THTensor_(set3d)(hsv, 1, y, x, image_(FromIntermediate)(s));
THTensor_(set3d)(hsv, 2, y, x, image_(FromIntermediate)(v));
}
}
return 0;
}
/*
* Converts an HSV color value to RGB. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSV_color_space.
* Assumes h, s, and l are contained in the set [0, 1] and
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_hsv2rgb)(lua_State *L) {
THTensor *hsv = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y, x;
temp_t r, g, b, h, s, v;
for (y=0; y<hsv->size[1]; y++) {
for (x=0; x<hsv->size[2]; x++) {
// get hsv
h = THTensor_(get3d)(hsv, 0, y, x);
s = THTensor_(get3d)(hsv, 1, y, x);
v = THTensor_(get3d)(hsv, 2, y, x);
#ifdef TH_REAL_IS_BYTE
h /= 255;
s /= 255;
v /= 255;
#endif
int i = floor(h*6.);
temp_t f = h*6-i;
temp_t p = v*(1-s);
temp_t q = v*(1-f*s);
temp_t t = v*(1-(1-f)*s);
switch (i % 6) {
case 0: r = v, g = t, b = p; break;
case 1: r = q, g = v, b = p; break;
case 2: r = p, g = v, b = t; break;
case 3: r = p, g = q, b = v; break;
case 4: r = t, g = p, b = v; break;
case 5: r = v, g = p, b = q; break;
default: r=0; g = 0, b = 0; break;
}
// set rgb
#ifdef TH_REAL_IS_BYTE
r *= 255;
g *= 255;
b *= 255;
#endif
THTensor_(set3d)(rgb, 0, y, x, image_(FromIntermediate)(r));
THTensor_(set3d)(rgb, 1, y, x, image_(FromIntermediate)(g));
THTensor_(set3d)(rgb, 2, y, x, image_(FromIntermediate)(b));
}
}
return 0;
}
#ifndef TH_REAL_IS_BYTE
/*
* Convert an sRGB color channel to a linear sRGB color channel.
*/
static inline real image_(gamma_expand_sRGB)(real nonlinear)
{
return (nonlinear <= 0.04045) ? (nonlinear / 12.92)
: (pow((nonlinear+0.055)/1.055, 2.4));
}
/*
* Convert a linear sRGB color channel to a sRGB color channel.
*/
static inline real image_(gamma_compress_sRGB)(real linear)
{
return (linear <= 0.0031308) ? (12.92 * linear)
: (1.055 * pow(linear, 1.0/2.4) - 0.055);
}
/*
* Converts an sRGB color value to LAB.
* Based on http://www.brucelindbloom.com/index.html?Equations.html.
* Assumes r, g, and b are contained in the set [0, 1].
* LAB output is NOT restricted to [0, 1]!
*/
int image_(Main_rgb2lab)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *lab = luaT_checkudata(L, 2, torch_Tensor);
// CIE Standard
double epsilon = 216.0/24389.0;
double k = 24389.0/27.0;
// D65 white point
double xn = 0.950456;
double zn = 1.088754;
int y,x;
real r,g,b,l,a,_b;
for (y=0; y<rgb->size[1]; y++) {
for (x=0; x<rgb->size[2]; x++) {
// get RGB
r = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 0, y, x));
g = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 1, y, x));
b = image_(gamma_expand_sRGB)(THTensor_(get3d)(rgb, 2, y, x));
// sRGB to XYZ
double X = 0.412453 * r + 0.357580 * g + 0.180423 * b;
double Y = 0.212671 * r + 0.715160 * g + 0.072169 * b;
double Z = 0.019334 * r + 0.119193 * g + 0.950227 * b;
// normalize for D65 white point
X /= xn;
Z /= zn;
// XYZ normalized to CIE Lab
double fx = X > epsilon ? pow(X, 1/3.0) : (k * X + 16)/116;
double fy = Y > epsilon ? pow(Y, 1/3.0) : (k * Y + 16)/116;
double fz = Z > epsilon ? pow(Z, 1/3.0) : (k * Z + 16)/116;
l = 116 * fy - 16;
a = 500 * (fx - fy);
_b = 200 * (fy - fz);
// set lab
THTensor_(set3d)(lab, 0, y, x, l);
THTensor_(set3d)(lab, 1, y, x, a);
THTensor_(set3d)(lab, 2, y, x, _b);
}
}
return 0;
}
/*
* Converts an LAB color value to sRGB.
* Based on http://www.brucelindbloom.com/index.html?Equations.html.
* returns r, g, and b in the set [0, 1].
*/
int image_(Main_lab2rgb)(lua_State *L) {
THTensor *lab = luaT_checkudata(L, 1, torch_Tensor);
THTensor *rgb = luaT_checkudata(L, 2, torch_Tensor);
int y,x;
real r,g,b,l,a,_b;
// CIE Standard
double epsilon = 216.0/24389.0;
double k = 24389.0/27.0;
// D65 white point
double xn = 0.950456;
double zn = 1.088754;
for (y=0; y<lab->size[1]; y++) {
for (x=0; x<lab->size[2]; x++) {
// get lab
l = THTensor_(get3d)(lab, 0, y, x);
a = THTensor_(get3d)(lab, 1, y, x);
_b = THTensor_(get3d)(lab, 2, y, x);
// LAB to XYZ
double fy = (l + 16) / 116;
double fz = fy - _b / 200;
double fx = (a / 500) + fy;
double X = pow(fx, 3);
if (X <= epsilon)
X = (116 * fx - 16) / k;
double Y = l > (k * epsilon) ? pow((l + 16) / 116, 3) : l/k;
double Z = pow(fz, 3);
if (Z <= epsilon)
Z = (116 * fz - 16) / k;
X *= xn;
Z *= zn;
// XYZ to sRGB
r = 3.2404542 * X - 1.5371385 * Y - 0.4985314 * Z;
g = -0.9692660 * X + 1.8760108 * Y + 0.0415560 * Z;
b = 0.0556434 * X - 0.2040259 * Y + 1.0572252 * Z;
// set rgb
THTensor_(set3d)(rgb, 0, y, x, image_(gamma_compress_sRGB(r)));
THTensor_(set3d)(rgb, 1, y, x, image_(gamma_compress_sRGB(g)));
THTensor_(set3d)(rgb, 2, y, x, image_(gamma_compress_sRGB(b)));
}
}
return 0;
}
#else
int image_(Main_rgb2lab)(lua_State *L) {
return luaL_error(L, "image.rgb2lab: not supported for torch.ByteTensor");
}
int image_(Main_lab2rgb)(lua_State *L) {
return luaL_error(L, "image.lab2rgb: not supported for torch.ByteTensor");
}
#endif // TH_REAL_IS_BYTE
/* Vertically flip an image */
int image_(Main_vflip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
int width = dst->size[2];
int height = dst->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
long k, x, y;
if (dst_data != src_data) {
/* not in-place.
* this branch could be removed by first duplicating the src into dst then doing inplace */
#pragma omp parallel for private(k, x, y)
for(k=0; k<channels; k++) {
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
dst_data[ k*os[0] + (height-1-y)*os[1] + x*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ];
}
}
}
} else {
/* in-place */
real swap, * src_px, * dst_px;
long half_height = height >> 1;
for(k=0; k<channels; k++) {
for (y=0; y < half_height; y++) {
for (x=0; x<width; x++) {
src_px = src_data + k*is[0] + y*is[1] + x*is[2];
dst_px = dst_data + k*is[0] + (height-1-y)*is[1] + x*is[2];
swap = *dst_px;
*dst_px = *src_px;
*src_px = swap;
}
}
}
}
return 0;
}
/* Horizontally flip an image */
int image_(Main_hflip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
int width = dst->size[2];
int height = dst->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
long k, x, y;
if (dst_data != src_data) {
/* not in-place.
* this branch could be removed by first duplicating the src into dst then doing inplace */
#pragma omp parallel for private(k, x, y)
for(k=0; k<channels; k++) {
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
dst_data[ k*os[0] + y*os[1] + (width-x-1)*os[2] ] = src_data[ k*is[0] + y*is[1] + x*is[2] ];
}
}
}
} else {
/* in-place */
real swap, * src_px, * dst_px;
long half_width = width >> 1;
for(k=0; k<channels; k++) {
for (y=0; y < height; y++) {
for (x=0; x<half_width; x++) {
src_px = src_data + k*is[0] + y*is[1] + x*is[2];
dst_px = dst_data + k*is[0] + y*is[1] + (width-x-1)*is[2];
swap = *dst_px;
*dst_px = *src_px;
*src_px = swap;
}
}
}
}
return 0;
}
/* flip an image along a specified dimension */
int image_(Main_flip)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
long flip_dim = luaL_checklong(L, 3);
if ((dst->nDimension != 5) || (src->nDimension != 5)) {
luaL_error(L, "image.flip: expected 5 dimensions for src and dst");
}
if (flip_dim < 1 || flip_dim > dst->nDimension || flip_dim > 5) {
luaL_error(L, "image.flip: flip_dim out of bounds");
}
flip_dim--; // Make it zero indexed
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
if (dst_data == src_data) {
luaL_error(L, "image.flip: in-place flip not supported");
}
long size0 = dst->size[0];
long size1 = dst->size[1];
long size2 = dst->size[2];
long size3 = dst->size[3];
long size4 = dst->size[4];
if (src->size[0] != size0 || src->size[1] != size1 ||
src->size[2] != size2 || src->size[3] != size3 ||
src->size[4] != size4) {
luaL_error(L, "image.flip: src and dst are not the same size");
}
long *is = src->stride;
long *os = dst->stride;
long x, y, z, d, t, isrc, idst = 0;
for (t = 0; t < size0; t++) {
for (d = 0; d < size1; d++) {
for (z = 0; z < size2; z++) {
for (y = 0; y < size3; y++) {
for (x = 0; x < size4; x++) {
isrc = t*is[0] + d*is[1] + z*is[2] + y*is[3] + x*is[4];
// The big switch statement here looks ugly, however on my machine
// gcc compiles it to a skip list, so it should be fast.
switch (flip_dim) {
case 0:
idst = (size0 - t - 1)*os[0] + d*os[1] + z*os[2] + y*os[3] + x*os[4];
break;
case 1:
idst = t*os[0] + (size1 - d - 1)*os[1] + z*os[2] + y*os[3] + x*os[4];
break;
case 2:
idst = t*os[0] + d*os[1] + (size2 - z - 1)*os[2] + y*os[3] + x*os[4];
break;
case 3:
idst = t*os[0] + d*os[1] + z*os[2] + (size3 - y - 1)*os[3] + x*os[4];
break;
case 4:
idst = t*os[0] + d*os[1] + z*os[2] + y*os[3] + (size4 - x - 1)*os[4];
break;
}
dst_data[ idst ] = src_data[ isrc ];
}
}
}
}
}
return 0;
}
static inline void image_(Main_bicubicInterpolate)(
real* src, long* is, long* size, temp_t ix, temp_t iy,
real* dst, long *os,
real pad_value, int bounds_check)
{
int i, j, k;
temp_t arr[4], p[4];
// Calculate fractional and integer components
long x_pix = floor(ix);
long y_pix = floor(iy);
temp_t dx = ix - x_pix;
temp_t dy = iy - y_pix;
for (k=0; k<size[0]; k++) {
#pragma unroll
for (i = 0; i < 4; i++) {
long v = y_pix + i - 1;
real* data = &src[k * is[0] + v * is[1]];
#pragma unroll
for (j = 0; j < 4; j++) {
long u = x_pix + j - 1;
if (bounds_check && (v < 0 || v >= size[1] || u < 0 || u >= size[2])) {
p[j] = pad_value;
} else {
p[j] = data[u * is[2]];
}
}
arr[i] = image_(Main_cubicInterpolate)(p[0], p[1], p[2], p[3], dx);
}
temp_t value = image_(Main_cubicInterpolate)(arr[0], arr[1], arr[2], arr[3], dy);
dst[k * os[0]] = image_(FromIntermediate)(value);
}
}
/*
* Warps an image, according to an (x,y) flow field. The flow
* field is in the space of the destination image, each vector
* ponts to a source pixel in the original image.
*/
int image_(Main_warp)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
THTensor *src = luaT_checkudata(L, 2, torch_Tensor);
THTensor *flowfield = luaT_checkudata(L, 3, torch_Tensor);
int mode = lua_tointeger(L, 4);
int offset_mode = lua_toboolean(L, 5);
int clamp_mode = lua_tointeger(L, 6);
real pad_value = (real)lua_tonumber(L, 7);
// dims
int width = dst->size[2];
int height = dst->size[1];
int src_width = src->size[2];
int src_height = src->size[1];
int channels = dst->size[0];
long *is = src->stride;
long *os = dst->stride;
long *fs = flowfield->stride;
// get raw pointers
real *dst_data = THTensor_(data)(dst);
real *src_data = THTensor_(data)(src);
real *flow_data = THTensor_(data)(flowfield);
// resample
long k,x,y,v,u,i,j;
#pragma omp parallel for private(k, x, y, v, u, i, j)
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
// subpixel position:
float flow_y = flow_data[ 0*fs[0] + y*fs[1] + x*fs[2] ];
float flow_x = flow_data[ 1*fs[0] + y*fs[1] + x*fs[2] ];
float iy = offset_mode*y + flow_y;
float ix = offset_mode*x + flow_x;
// borders
int off_image = 0;
if (iy < 0 || iy > src_height - 1 ||
ix < 0 || ix > src_width - 1) {
off_image = 1;
}
if (off_image == 1 && clamp_mode == 1) {
// We're off the image and we're clamping the input image to 0
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = pad_value;
}
} else {
ix = MAX(ix,0); ix = MIN(ix,src_width-1);
iy = MAX(iy,0); iy = MIN(iy,src_height-1);
// bilinear?
switch (mode) {
case 1: // Bilinear interpolation
{
// 4 nearest neighbors:
long ix_nw = floor(ix);
long iy_nw = floor(iy);
long ix_ne = ix_nw + 1;
long iy_ne = iy_nw;
long ix_sw = ix_nw;
long iy_sw = iy_nw + 1;
long ix_se = ix_nw + 1;
long iy_se = iy_nw + 1;
// get surfaces to each neighbor:
temp_t nw = (ix_se-ix)*(iy_se-iy);
temp_t ne = (ix-ix_sw)*(iy_sw-iy);
temp_t sw = (ix_ne-ix)*(iy-iy_ne);
temp_t se = (ix-ix_nw)*(iy-iy_nw);
// weighted sum of neighbors:
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)(
src_data[ k*is[0] + iy_nw*is[1] + ix_nw*is[2] ] * nw
+ src_data[ k*is[0] + iy_ne*is[1] + MIN(ix_ne,src_width-1)*is[2] ] * ne
+ src_data[ k*is[0] + MIN(iy_sw,src_height-1)*is[1] + ix_sw*is[2] ] * sw
+ src_data[ k*is[0] + MIN(iy_se,src_height-1)*is[1] + MIN(ix_se,src_width-1)*is[2] ] * se);
}
}
break;
case 0: // Simple (i.e., nearest neighbor)
{
// 1 nearest neighbor:
long ix_n = floor(ix+0.5);
long iy_n = floor(iy+0.5);
// weighted sum of neighbors:
for (k=0; k<channels; k++) {
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = src_data[ k*is[0] + iy_n*is[1] + ix_n*is[2] ];
}
}
break;
case 2: // Bicubic
{
// We only need to do bounds checking if ix or iy are near the edge
int edge = !(iy >= 1 && iy < src_height - 2 && ix >= 1 && ix < src_width - 2);
real* dst = dst_data + y*os[1] + x*os[2];
if (edge) {
image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 1);
} else {
image_(Main_bicubicInterpolate)(src_data, is, src->size, ix, iy, dst, os, pad_value, 0);
}
}
break;
case 3: // Lanczos
{
// Note: Lanczos can be made fast if the resampling period is
// constant... and therefore the Lu, Lv can be cached and reused.
// However, unfortunately warp makes no assumptions about resampling
// and so we need to perform the O(k^2) convolution on each pixel AND
// we have to re-calculate the kernel for every pixel.
// See wikipedia for more info.
// It is however an extremely good approximation to to full sinc
// interpolation (IIR) filter.
// Another note is that the version here has been optimized using
// pretty aggressive code flow and explicit inlining. It might not
// be very readable (contact me, Jonathan Tompson, if it is not)
// Calculate fractional and integer components
long x_pix = floor(ix);
long y_pix = floor(iy);
// Precalculate the L(x) function evaluations in the u and v direction
#define rad (3) // This is a tunable parameter: 2 to 3 is OK
float Lu[2 * rad]; // L(x) for u direction
float Lv[2 * rad]; // L(x) for v direction
for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) {
float du = ix - (float)u; // Lanczos kernel x value
du = du < 0 ? -du : du; // prefer not to used std absf
if (du < 0.000001f) { // TODO: Is there a real eps standard?
Lu[i] = 1;
} else if (du > (float)rad) {
Lu[i] = 0;
} else {
Lu[i] = ((float)rad * sin((float)M_PI * du) *
sin((float)M_PI * du / (float)rad)) /
((float)(M_PI * M_PI) * du * du);
}
}
for (v=y_pix-rad+1, i=0; v<=y_pix+rad; v++, i++) {
float dv = iy - (float)v; // Lanczos kernel x value
dv = dv < 0 ? -dv : dv; // prefer not to used std absf
if (dv < 0.000001f) { // TODO: Is there a real eps standard?
Lv[i] = 1;
} else if (dv > (float)rad) {
Lv[i] = 0;
} else {
Lv[i] = ((float)rad * sin((float)M_PI * dv) *
sin((float)M_PI * dv / (float)rad)) /
((float)(M_PI * M_PI) * dv * dv);
}
}
float sum_weights = 0;
for (u=0; u<2*rad; u++) {
for (v=0; v<2*rad; v++) {
sum_weights += (Lu[u] * Lv[v]);
}
}
for (k=0; k<channels; k++) {
temp_t result = 0;
for (u=x_pix-rad+1, i=0; u<=x_pix+rad; u++, i++) {
long curu = MAX(MIN((long)(src_width-1), u), 0);
for (v=y_pix-rad+1, j=0; v<=y_pix+rad; v++, j++) {
long curv = MAX(MIN((long)(src_height-1), v), 0);
temp_t Suv = src_data[k * is[0] + curv * is[1] + curu * is[2]];
temp_t weight = Lu[i] * Lv[j];
result += (Suv * weight);
}
}
// Normalize by the sum of the weights
result = result / (float)sum_weights;
// Again, I assume that since the image is stored as reals we
// don't have to worry about clamping to min and max int (to
// prevent over or underflow)
dst_data[ k*os[0] + y*os[1] + x*os[2] ] = image_(FromIntermediate)(result);
}
}
break;
} // end switch (mode)
} // end else
}
}
// done
return 0;
}
int image_(Main_gaussian)(lua_State *L) {
THTensor *dst = luaT_checkudata(L, 1, torch_Tensor);
long width = dst->size[1];
long height = dst->size[0];
long *os = dst->stride;
real *dst_data = THTensor_(data)(dst);
temp_t amplitude = (temp_t)lua_tonumber(L, 2);
int normalize = (int)lua_toboolean(L, 3);
temp_t sigma_u = (temp_t)lua_tonumber(L, 4);
temp_t sigma_v = (temp_t)lua_tonumber(L, 5);
temp_t mean_u = (temp_t)lua_tonumber(L, 6) * width + 0.5;
temp_t mean_v = (temp_t)lua_tonumber(L, 7) * height + 0.5;
// Precalculate 1/(sigma*size) for speed (for some stupid reason the pragma
// omp declaration prevents gcc from optimizing the inside loop on my macine:
// verified by checking the assembly output)
temp_t over_sigmau = 1.0 / (sigma_u * width);
temp_t over_sigmav = 1.0 / (sigma_v * height);
long v, u;
temp_t du, dv;
#pragma omp parallel for private(v, u, du, dv)
for (v = 0; v < height; v++) {
for (u = 0; u < width; u++) {
du = (u + 1 - mean_u) * over_sigmau;
dv = (v + 1 - mean_v) * over_sigmav;
temp_t value = amplitude * exp(-0.5 * (du*du + dv*dv));
dst_data[ v*os[0] + u*os[1] ] = image_(FromIntermediate)(value);
}
}
if (normalize) {
temp_t sum = 0;
// We could parallelize this, but it's more trouble than it's worth
for(v = 0; v < height; v++) {
for(u = 0; u < width; u++) {
sum += dst_data[ v*os[0] + u*os[1] ];
}
}
temp_t one_over_sum = 1.0 / sum;
#pragma omp parallel for private(v, u)
for(v = 0; v < height; v++) {
for(u = 0; u < width; u++) {
dst_data[ v*os[0] + u*os[1] ] *= one_over_sum;
}
}
}
return 0;
}
/*
* Borrowed from github.com/clementfarabet/lua---imgraph
* with Clément's permission for implementing y2jet()
*/
int image_(Main_colorize)(lua_State *L) {
// get args
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
THTensor *input = (THTensor *)luaT_checkudata(L, 2, torch_Tensor);
THTensor *colormap = (THTensor *)luaT_checkudata(L, 3, torch_Tensor);
// dims
long height = input->size[0];
long width = input->size[1];
// generate color map if not given
int noColorMap = THTensor_(nElement)(colormap) == 0;
if (noColorMap) {
THTensor_(resize2d)(colormap, width*height, 3);
THTensor_(fill)(colormap, -1);
}
// colormap channels
int channels = colormap->size[1];
// generate output
THTensor_(resize3d)(output, channels, height, width);
int x,y,k;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
int id = THTensor_(get2d)(input, y, x);
if (noColorMap) {
for (k = 0; k < channels; k++) {
temp_t value = (float)rand() / (float)RAND_MAX;
#ifdef TH_REAL_IS_BYTE
value *= 255;
#endif
THTensor_(set2d)(colormap, id, k, image_(FromIntermediate)(value));
}
}
for (k = 0; k < channels; k++) {
real color = THTensor_(get2d)(colormap, id, k);
THTensor_(set3d)(output, k, y, x, color);
}
}
}
// return nothing
return 0;
}
int image_(Main_rgb2y)(lua_State *L) {
THTensor *rgb = luaT_checkudata(L, 1, torch_Tensor);
THTensor *yim = luaT_checkudata(L, 2, torch_Tensor);
luaL_argcheck(L, rgb->nDimension == 3, 1, "image.rgb2y: src not 3D");
luaL_argcheck(L, yim->nDimension == 2, 2, "image.rgb2y: dst not 2D");
luaL_argcheck(L, rgb->size[1] == yim->size[0], 2,
"image.rgb2y: src and dst not of same height");
luaL_argcheck(L, rgb->size[2] == yim->size[1], 2,
"image.rgb2y: src and dst not of same width");
int y, x;
temp_t r, g, b, yc;
const int height = rgb->size[1];
const int width = rgb->size[2];
for (y=0; y<height; y++) {
for (x=0; x<width; x++) {
// get Rgb
r = THTensor_(get3d)(rgb, 0, y, x);
g = THTensor_(get3d)(rgb, 1, y, x);
b = THTensor_(get3d)(rgb, 2, y, x);
yc = 0.299 * r + 0.587 * g + 0.114 * b;
THTensor_(set2d)(yim, y, x, image_(FromIntermediate)(yc));
}
}
return 0;
}
static inline void image_(drawPixel)(THTensor *output, int y, int x,
int cr, int cg, int cb) {
#ifdef TH_REAL_IS_BYTE
THTensor_(set3d)(output, 0, y, x, cr);
THTensor_(set3d)(output, 1, y, x, cg);
THTensor_(set3d)(output, 2, y, x, cb);
#else
THTensor_(set3d)(output, 0, y, x, cr / 255.0f);
THTensor_(set3d)(output, 1, y, x, cg / 255.0f);
THTensor_(set3d)(output, 2, y, x, cb / 255.0f);
#endif
}
static inline void image_(drawChar)(THTensor *output, int x, int y, unsigned char c, int size,
int cr, int cg, int cb,
int bg_cr, int bg_cg, int bg_cb) {
long channels = output->size[0];
long height = output->size[1];
long width = output->size[2];
/* out of bounds condition, return without drawing */
if((x >= width) || // Clip right
(y >= height) || // Clip bottom
((x + 6 * size - 1) < 0) || // Clip left
((y + 8 * size - 1) < 0)) // Clip top
return;
for(char i = 0; i < 6; i++ ) {
unsigned char line;
if (i < 5) {
line = *(const unsigned char *)(image_ada_font+(c*5) + i);
} else {
line = 0x0;
}
for(char j = 0; j < 8; j++, line >>= 1) {
if(line & 0x1) {
if (size == 1) {
image_(drawPixel)(output, y+j, x+i, cr, cg, cb);
}
else {
for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) {
for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) {
image_(drawPixel)(output, jj, ii, cr, cg, cb);
}
}
}
} else if (bg_cr != -1 && bg_cg != -1 && bg_cb != -1) {
if (size == 1) {
image_(drawPixel)(output, y+j, x+i, bg_cr, bg_cg, bg_cb);
} else {
for (int ii = x+(i*size); ii < x+(i*size) + size; ii++) {
for (int jj = y+(j*size); jj < y+(j*size) + size; jj++) {
image_(drawPixel)(output, jj, ii, bg_cr, bg_cg, bg_cb);
}
}
}
}
}
}
}
#ifndef luaL_checkint
#define luaL_checkint(l,arg) (int)luaL_checkinteger(l,arg)
#endif
int image_(Main_drawtext)(lua_State *L) {
// get args
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
const char* text = lua_tostring(L, 2);
long x = luaL_checklong(L, 3);
long y = luaL_checklong(L, 4);
int size = luaL_checkint(L, 5);
int cr = luaL_checkint(L, 6);
int cg = luaL_checkint(L, 7);
int cb = luaL_checkint(L, 8);
int bg_cr = luaL_checkint(L, 9);
int bg_cg = luaL_checkint(L, 10);
int bg_cb = luaL_checkint(L, 11);
int wrap = luaL_checkint(L, 12);
long len = strlen(text);
// dims
long channels = output->size[0];
long height = output->size[1];
long width = output->size[2];
long cursor_y = y;
long cursor_x = x;
for (long cnt = 0; cnt < len; cnt++) {
unsigned char c = text[cnt];
if(c == '\n') {
cursor_y += size*8;
cursor_x = x;
} else if(c == '\r') {
// skip em
} else {
if(wrap && ((cursor_x + size * 6) >= width)) { // Heading off edge?
cursor_x = 0; // Reset x to zero
cursor_y += size * 8; // Advance y one line
}
image_(drawChar)(output, cursor_x, cursor_y, c, size,
cr, cg, cb,
bg_cr, bg_cg, bg_cb);
cursor_x += size * 6;
}
}
return 0;
}
int image_(Main_drawRect)(lua_State *L) {
THTensor *output = (THTensor *)luaT_checkudata(L, 1, torch_Tensor);
long x1long = luaL_checklong(L, 2);
long y1long = luaL_checklong(L, 3);
long x2long = luaL_checklong(L, 4);
long y2long = luaL_checklong(L, 5);
int lineWidth = luaL_checkint(L, 6);
int cr = luaL_checkint(L, 7);
int cg = luaL_checkint(L, 8);
int cb = luaL_checkint(L, 9);
int loffset = lineWidth / 2 + 1;
int uoffset = lineWidth - loffset - 1;
int x1l = (int) MAX(0, x1long - loffset);
int y1l = (int) MAX(0, y1long - loffset);
int x1u = (int) MIN(output->size[2], x1long + uoffset + 1);
int y1u = (int) MIN(output->size[1], y1long + uoffset + 1);
int x2l = (int) MAX(0, x2long - loffset);
int y2l = (int) MAX(0, y2long - loffset);
int x2u = (int) MIN(output->size[2], x2long + uoffset + 1);
int y2u = (int) MIN(output->size[1], y2long + uoffset + 1);
for (int y = y1l; y < y2u; y++) {
for (int x = x1l; x < x1u; x++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
for (int x = x2l; x < x2u; x++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
}
for (int x = x1l; x < x2u; x++) {
for (int y = y1l; y < y1u; y++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
for (int y = y2l; y < y2u; y++) {
image_(drawPixel)(output, y, x, cr, cg, cb);
}
}
return 0;
}
static const struct luaL_Reg image_(Main__) [] = {
{"scaleSimple", image_(Main_scaleSimple)},
{"scaleBilinear", image_(Main_scaleBilinear)},
{"scaleBicubic", image_(Main_scaleBicubic)},
{"rotate", image_(Main_rotate)},
{"rotateBilinear", image_(Main_rotateBilinear)},
{"polar", image_(Main_polar)},
{"polarBilinear", image_(Main_polarBilinear)},
{"logPolar", image_(Main_logPolar)},
{"logPolarBilinear", image_(Main_logPolarBilinear)},
{"translate", image_(Main_translate)},
{"cropNoScale", image_(Main_cropNoScale)},
{"warp", image_(Main_warp)},
{"saturate", image_(Main_saturate)},
{"rgb2y", image_(Main_rgb2y)},
{"rgb2hsv", image_(Main_rgb2hsv)},
{"rgb2hsl", image_(Main_rgb2hsl)},
{"hsv2rgb", image_(Main_hsv2rgb)},
{"hsl2rgb", image_(Main_hsl2rgb)},
{"rgb2lab", image_(Main_rgb2lab)},
{"lab2rgb", image_(Main_lab2rgb)},
{"gaussian", image_(Main_gaussian)},
{"vflip", image_(Main_vflip)},
{"hflip", image_(Main_hflip)},
{"flip", image_(Main_flip)},
{"colorize", image_(Main_colorize)},
{"text", image_(Main_drawtext)},
{"drawRect", image_(Main_drawRect)},
{NULL, NULL}
};
void image_(Main_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, image_(Main__), "image");
}
#endif // TH_GENERIC_FILE
|
nr_direct.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <assert.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "cint_funcs.h"
#include "vhf/nr_direct.h"
#define MIN(I,J) ((I) < (J) ? (I) : (J))
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
static int _max_cache_size(int (*intor)(), int *shls_slice, int *images_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i, n;
int i0 = shls_slice[0];
int i1 = shls_slice[1];
int shls[4];
int cache_size = 0;
for (i = i0; i < i1; i++) {
shls[0] = images_loc[i];
shls[1] = images_loc[i];
shls[2] = images_loc[i];
shls[3] = images_loc[i];
n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL);
cache_size = MAX(cache_size, n);
}
return cache_size;
}
static int _assemble_eris(double *eri_buf, int *images_loc,
int ishell, int jshell, int kshell, int lshell,
double cutoff, CVHFOpt *vhfopt, IntorEnvs *envs)
{
int *atm = envs->atm;
int *bas = envs->bas;
double *env = envs->env;
int natm = envs->natm;
int nbas = envs->nbas;
CINTOpt *cintopt = envs->cintopt;
const size_t Nbas = nbas;
const int *ao_loc = envs->ao_loc;
const int ish0 = images_loc[ishell];
const int jsh0 = images_loc[jshell];
const int ksh0 = images_loc[kshell];
const int lsh0 = images_loc[lshell];
const int jsh1 = images_loc[jshell+1];
const int ksh1 = images_loc[kshell+1];
const int lsh1 = images_loc[lshell+1];
const int i0 = ao_loc[ishell];
const int j0 = ao_loc[jshell];
const int k0 = ao_loc[kshell];
const int l0 = ao_loc[lshell];
const int i1 = ao_loc[ishell+1];
const int j1 = ao_loc[jshell+1];
const int k1 = ao_loc[kshell+1];
const int l1 = ao_loc[lshell+1];
const int di = i1 - i0;
const int dj = j1 - j0;
const int dk = k1 - k0;
const int dl = l1 - l0;
const int dijkl = di * dj * dk * dl;
double *q_cond_ijij = vhfopt->q_cond;
double *q_cond_iijj = vhfopt->q_cond + Nbas*Nbas;
double *q_cond_ij, *q_cond_kl, *q_cond_ik, *q_cond_jk;
double *eri = eri_buf;
double *bufL = eri_buf + dijkl;
double *cache = bufL + dijkl;
int shls[4] = {ish0};
int n, jsh, ksh, lsh;
double kl_cutoff, jl_cutoff, il_cutoff;
int empty = 1;
for (n = 0; n < dijkl; n++) {
eri[n] = 0;
}
q_cond_ij = q_cond_ijij + ish0 * Nbas;
q_cond_ik = q_cond_iijj + ish0 * Nbas;
for (jsh = jsh0; jsh < jsh1; jsh++) {
if (q_cond_ij[jsh] < cutoff) {
continue;
}
kl_cutoff = cutoff / q_cond_ij[jsh];
q_cond_jk = q_cond_iijj + jsh * Nbas;
for (ksh = ksh0; ksh < ksh1; ksh++) {
if (q_cond_ik[ksh] < cutoff ||
q_cond_jk[ksh] < cutoff) {
continue;
}
q_cond_kl = q_cond_ijij + ksh * Nbas;
jl_cutoff = cutoff / q_cond_ik[ksh];
il_cutoff = cutoff / q_cond_jk[ksh];
for (lsh = lsh0; lsh < lsh1; lsh++) {
if (q_cond_kl[lsh] < kl_cutoff ||
q_cond_jk[lsh] < jl_cutoff ||
q_cond_ik[lsh] < il_cutoff) {
continue;
}
shls[1] = jsh;
shls[2] = ksh;
shls[3] = lsh;
if (int2e_sph(bufL, NULL, shls, atm, natm,
bas, nbas, env, cintopt, cache)) {
for (n = 0; n < dijkl; n++) {
eri[n] += bufL[n];
}
empty = 0;
}
}
}
}
return !empty;
}
void PBCVHF_contract_k_s1(double *vk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_jk_off = dm_translation[cell_j * nkpts + cell_k];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_jk_cond = vhfopt->dm_cond[dm_jk_off*nn0 + jshp*nbasp+kshp];
if (dm_jk_cond < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_jk_cond;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
int idm, i, jp, kp, lp, n;
double sjk, qijkl;
double *dm_jk;
vk += cell_l * naop;
for (idm = 0; idm < n_dm; idm++) {
dm_jk = dms + dm_jk_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
for (jp = jp0; jp < jp1; jp++) {
sjk = dm_jk[jp*naop+kp];
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vk[i*bn+lp] += qijkl * sjk;
} } }
}
vk += bnn;
}
}
static void contract_k_s2_kgtl(double *vk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_jk_off = dm_translation[cell_j*nkpts+cell_k];
const int dm_jl_off = dm_translation[cell_j*nkpts+cell_l];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_jk_cond = vhfopt->dm_cond[dm_jk_off*nn0 + jshp*nbasp+kshp];
double dm_jl_cond = vhfopt->dm_cond[dm_jl_off*nn0 + jshp*nbasp+lshp];
double dm_cond_max = MAX(dm_jk_cond, dm_jl_cond);
if (dm_cond_max < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_cond_max;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
int idm, i, jp, kp, lp, n;
double sjk, sjl, qijkl;
double *dm_jk, *dm_jl;
double *vk_ik = vk + cell_k * naop;
double *vk_il = vk + cell_l * naop;
for (idm = 0; idm < n_dm; idm++) {
dm_jk = dms + dm_jk_off * nn + idm * knn;
dm_jl = dms + dm_jl_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
for (jp = jp0; jp < jp1; jp++) {
sjk = dm_jk[jp*naop+kp];
sjl = dm_jl[jp*naop+lp];
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vk_il[i*bn+lp] += qijkl * sjk;
vk_ik[i*bn+kp] += qijkl * sjl;
} } }
}
vk_ik += bnn;
vk_il += bnn;
}
}
void PBCVHF_contract_k_s2kl(double *vk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ksh > lsh) {
contract_k_s2_kgtl(vk, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
} else if (ksh == lsh) {
PBCVHF_contract_k_s1(vk, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
}
}
void PBCVHF_contract_j_s1(double *vj, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_lk_off = dm_translation[cell_l * nkpts + cell_k];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_lk_cond = vhfopt->dm_cond[dm_lk_off*nn0 + lshp*nbasp+kshp];
if (dm_lk_cond < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_lk_cond;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
int idm, i, jp, kp, lp, n;
double slk, qijkl;
double *dm_lk;
vj += cell_j * naop;
for (idm = 0; idm < n_dm; idm++) {
dm_lk = dms + dm_lk_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
slk = dm_lk[lp*naop+kp];
for (jp = jp0; jp < jp1; jp++) {
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vj[i*bn+jp] += qijkl * slk;
} }
} }
vj += bnn;
}
}
static void contract_j_s2_kgtl(double *vj, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_lk_off = dm_translation[cell_l * nkpts + cell_k];
const int dm_kl_off = dm_translation[cell_k * nkpts + cell_l];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_lk_cond = vhfopt->dm_cond[dm_lk_off*nn0 + lshp*nbasp+kshp];
double dm_kl_cond = vhfopt->dm_cond[dm_kl_off*nn0 + kshp*nbasp+lshp];
double dm_cond_max = dm_lk_cond + dm_kl_cond;
if (dm_cond_max < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_cond_max;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
int idm, i, jp, kp, lp, n;
double slk, qijkl;
double *dm_lk, *dm_kl;
vj += cell_j * naop;
for (idm = 0; idm < n_dm; idm++) {
dm_lk = dms + dm_lk_off * nn + idm * knn;
dm_kl = dms + dm_kl_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
slk = dm_lk[lp*naop+kp] + dm_kl[kp*naop+lp];
for (jp = jp0; jp < jp1; jp++) {
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vj[i*bn+jp] += qijkl * slk;
} }
} }
vj += bnn;
}
}
void PBCVHF_contract_j_s2kl(double *vj, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ksh > lsh) {
contract_j_s2_kgtl(vj, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
} else if (ksh == lsh) {
PBCVHF_contract_j_s1(vj, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
}
}
void PBCVHF_contract_jk_s1(double *jk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_lk_off = dm_translation[cell_l * nkpts + cell_k];
const int dm_jk_off = dm_translation[cell_j * nkpts + cell_k];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_lk_cond = vhfopt->dm_cond[dm_lk_off*nn0 + lshp*nbasp+kshp];
double dm_jk_cond = vhfopt->dm_cond[dm_jk_off*nn0 + jshp*nbasp+kshp];
double dm_cond_max = MAX(dm_lk_cond, dm_jk_cond);
if (dm_cond_max < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_cond_max;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
double *vj = jk + cell_j * naop;
double *vk = jk + n_dm * bnn + cell_l * naop;
int idm, i, jp, kp, lp, n;
double slk, sjk, qijkl;
double *dm_lk, *dm_jk;
for (idm = 0; idm < n_dm; idm++) {
dm_lk = dms + dm_lk_off * nn + idm * knn;
dm_jk = dms + dm_jk_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
slk = dm_lk[lp*naop+kp];
for (jp = jp0; jp < jp1; jp++) {
sjk = dm_jk[jp*naop+kp];
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vj[i*bn+jp] += qijkl * slk;
vk[i*bn+lp] += qijkl * sjk;
}
}
} }
vj += bnn;
vk += bnn;
}
}
static void contract_jk_s2_kgtl(double *jk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
const int cell_j = bvk_cell_id[jsh];
const int cell_k = bvk_cell_id[ksh];
const int cell_l = bvk_cell_id[lsh];
const int jshp = cell0_shl_id[jsh];
const int kshp = cell0_shl_id[ksh];
const int lshp = cell0_shl_id[lsh];
const int dm_jk_off = dm_translation[cell_j*nkpts+cell_k];
const int dm_jl_off = dm_translation[cell_j*nkpts+cell_l];
const int dm_lk_off = dm_translation[cell_l*nkpts+cell_k];
const int dm_kl_off = dm_translation[cell_k*nkpts+cell_l];
const int nn0 = nbasp * nbasp;
double direct_scf_cutoff = vhfopt->direct_scf_cutoff;
double dm_jk_cond = vhfopt->dm_cond[dm_jk_off*nn0 + jshp*nbasp+kshp];
double dm_jl_cond = vhfopt->dm_cond[dm_jl_off*nn0 + jshp*nbasp+lshp];
double dm_lk_cond = vhfopt->dm_cond[dm_lk_off*nn0 + lshp*nbasp+kshp];
double dm_kl_cond = vhfopt->dm_cond[dm_kl_off*nn0 + kshp*nbasp+lshp];
double dm_cond_max = MAX(dm_jk_cond, dm_jl_cond);
dm_cond_max = MAX(dm_cond_max, dm_lk_cond + dm_kl_cond);
if (dm_cond_max < direct_scf_cutoff) {
return;
} else {
direct_scf_cutoff /= dm_cond_max;
}
if (!_assemble_eris(buf, images_loc, ish, jsh, ksh, lsh,
direct_scf_cutoff, vhfopt, envs)) {
return;
}
const int *ao_loc = envs->ao_loc;
const int naop = ao_loc[nbasp];
const int nn = naop * naop;
const int bn = naop * nbands;
const int knn = nn * nkpts;
const int bnn = bn * naop;
const int i0 = ao_loc[ish];
const int jp0 = ao_loc[jshp];
const int kp0 = ao_loc[kshp];
const int lp0 = ao_loc[lshp];
const int i1 = ao_loc[ish+1];
const int jp1 = ao_loc[jshp+1];
const int kp1 = ao_loc[kshp+1];
const int lp1 = ao_loc[lshp+1];
double *vj = jk + cell_j * naop;
double *vk_ik = jk + n_dm * bnn + cell_k * naop;
double *vk_il = jk + n_dm * bnn + cell_l * naop;
int idm, i, jp, kp, lp, n;
double sjk, sjl, slk, qijkl;
double *dm_jk, *dm_jl, *dm_lk, *dm_kl;
for (idm = 0; idm < n_dm; idm++) {
dm_lk = dms + dm_lk_off * nn + idm * knn;
dm_kl = dms + dm_kl_off * nn + idm * knn;
dm_jk = dms + dm_jk_off * nn + idm * knn;
dm_jl = dms + dm_jl_off * nn + idm * knn;
n = 0;
for (lp = lp0; lp < lp1; lp++) {
for (kp = kp0; kp < kp1; kp++) {
slk = dm_lk[lp*naop+kp] + dm_kl[kp*naop+lp];
for (jp = jp0; jp < jp1; jp++) {
sjk = dm_jk[jp*naop+kp];
sjl = dm_jl[jp*naop+lp];
for (i = i0; i < i1; i++, n++) {
qijkl = buf[n];
vj[i*bn+jp] += qijkl * slk;
vk_il[i*bn+lp] += qijkl * sjk;
vk_ik[i*bn+kp] += qijkl * sjl;
} }
}
}
vj += bnn;
vk_ik += bnn;
vk_il += bnn;
}
}
void PBCVHF_contract_jk_s2kl(double *jk, double *dms, double *buf,
int n_dm, int nkpts, int nbands, int nbasp,
int ish, int jsh, int ksh, int lsh,
int *bvk_cell_id, int *cell0_shl_id,
int *images_loc, int *dm_translation,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ksh > lsh) {
contract_jk_s2_kgtl(jk, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
} else if (ksh == lsh) {
PBCVHF_contract_jk_s1(jk, dms, buf, n_dm, nkpts, nbands, nbasp,
ish, jsh, ksh, lsh, bvk_cell_id,
cell0_shl_id, images_loc,
dm_translation, vhfopt, envs);
}
}
/*
* shls_slice refers to the shells of entire sup-mol.
* bvk_ao_loc are ao_locs of bvk-cell basis appeared in supmol (some basis are removed)
* nbasp is the number of basis in primitive cell
* dm_translation utilizes the translation symmetry for density matrices (wrt the full bvk-cell)
* DM[M,N] = DM[N-M] by mapping the 2D subscripts to 1D subscripts
*/
void PBCVHF_direct_drv(void (*fdot)(), double *out, double *dms,
int n_dm, int nkpts, int nbands, int nbasp,
char *ovlp_mask, int *bvk_cell_id,
int *cell0_shl_id, int *images_loc,
int *shls_slice, int *bvk_ao_loc,
int *dm_translation, CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, bvk_ao_loc,
NULL, cintopt, 1};
const size_t ish0 = shls_slice[0];
const size_t ish1 = shls_slice[1];
const size_t jsh0 = shls_slice[2];
const size_t jsh1 = shls_slice[3];
const size_t ksh0 = shls_slice[4];
const size_t ksh1 = shls_slice[5];
const size_t lsh0 = shls_slice[6];
const size_t lsh1 = shls_slice[7];
const size_t nish = ish1 - ish0;
const size_t njsh = jsh1 - jsh0;
const size_t nksh = ksh1 - ksh0;
const size_t nlsh = lsh1 - lsh0;
const int di = GTOmax_shell_dim(bvk_ao_loc, shls_slice, 1);
const int cache_size = _max_cache_size(int2e_sph, shls_slice, images_loc,
atm, natm, bas, nbas, env);
const size_t nij = nish * njsh;
const int naop = bvk_ao_loc[nbasp];
#pragma omp parallel
{
size_t ij, n;
int i, j, k, l;
size_t size = n_dm * naop * naop * nbands;
if (fdot == &PBCVHF_contract_jk_s2kl || fdot == &PBCVHF_contract_jk_s1) {
size *= 2; // vj and vk
}
double *v_priv = calloc(size, sizeof(double));
double *buf = malloc(sizeof(double) * (di*di*di*di*2 + cache_size));
#pragma omp for schedule(dynamic, 1)
for (ij = 0; ij < nij; ij++) {
i = ij / njsh;
j = ij % njsh;
if (!ovlp_mask[i*njsh+j]) {
continue;
}
for (k = 0; k < nksh; k++) {
for (l = 0; l < nlsh; l++) {
if (!ovlp_mask[k*nlsh+l]) {
continue;
}
(*fdot)(v_priv, dms, buf, n_dm, nkpts, nbands, nbasp,
i, j, k, l, bvk_cell_id, cell0_shl_id, images_loc,
dm_translation, vhfopt, &envs);
} }
}
#pragma omp critical
{
for (n = 0; n < size; n++) {
out[n] += v_priv[n];
}
}
free(buf);
free(v_priv);
}
}
/************************************************/
void CVHFset_int2e_q_cond(int (*intor)(), CINTOpt *cintopt, double *q_cond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env);
static int _int2e_swap_jk(double *buf, int *dims, int *shls,
int *atm, int natm, int *bas, int nbas, double *env,
CINTOpt *cintopt, double *cache)
{
int shls_swap_jk[4] = {shls[0], shls[2], shls[1], shls[3]};
return int2e_sph(buf, dims, shls_swap_jk, atm, natm, bas, nbas, env, cintopt, cache);
}
void PBCVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
nbas = opt->nbas;
size_t Nbas = nbas;
opt->q_cond = (double *)malloc(sizeof(double) * Nbas * Nbas * 2);
double *qcond_ijij = opt->q_cond;
double *qcond_iijj = qcond_ijij + Nbas * Nbas;
CVHFset_int2e_q_cond(intor, cintopt, qcond_ijij, ao_loc,
atm, natm, bas, nbas, env);
CVHFset_int2e_q_cond(_int2e_swap_jk, cintopt, qcond_iijj, ao_loc,
atm, natm, bas, nbas, env);
}
|
GB_binop__isle_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint64)
// A*D function (colscale): GB (_AxD__isle_uint64)
// D*A function (rowscale): GB (_DxB__isle_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint64)
// C=scalar+B GB (_bind1st__isle_uint64)
// C=scalar+B' GB (_bind1st_tran__isle_uint64)
// C=A+scalar GB (_bind2nd__isle_uint64)
// C=A'+scalar GB (_bind2nd_tran__isle_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT64 || GxB_NO_ISLE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__first_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int16)
// A*D function (colscale): GB (_AxD__first_int16)
// D*A function (rowscale): GB (_DxB__first_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT16 || GxB_NO_FIRST_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
omp_device_memory.c | // RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda
// REQUIRES: nvptx64-nvidia-cuda
#include <omp.h>
#include <stdio.h>
int main() {
const int N = 64;
int *device_ptr =
omp_alloc(N * sizeof(int), llvm_omp_target_device_mem_alloc);
#pragma omp target teams distribute parallel for is_device_ptr(device_ptr)
for (int i = 0; i < N; ++i) {
device_ptr[i] = 1;
}
int sum = 0;
#pragma omp target reduction(+ : sum) is_device_ptr(device_ptr)
for (int i = 0; i < N; ++i)
sum += device_ptr[i];
// CHECK: PASS
if (sum == N)
printf("PASS\n");
omp_free(device_ptr, llvm_omp_target_device_mem_alloc);
}
|
convolution_1x1_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4to1_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4a-inch/4a-outch
#if __aarch64__
kernel_tm_pack4.create(8, inch / 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)4u * 4, 4);
#else
kernel_tm_pack4.create(4, inch / 4, outch / 4 + outch % 4, (size_t)4u * 4, 4);
#endif
int p = 0;
#if __aarch64__
for (; p + 7 < outch; p += 8)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
const float* k4 = (const float*)kernel + (p + 4) * inch;
const float* k5 = (const float*)kernel + (p + 5) * inch;
const float* k6 = (const float*)kernel + (p + 6) * inch;
const float* k7 = (const float*)kernel + (p + 7) * inch;
float* ktmp = kernel_tm_pack4.channel(p / 8);
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp[8] = k0[1];
ktmp[9] = k1[1];
ktmp[10] = k2[1];
ktmp[11] = k3[1];
ktmp[12] = k4[1];
ktmp[13] = k5[1];
ktmp[14] = k6[1];
ktmp[15] = k7[1];
ktmp[16] = k0[2];
ktmp[17] = k1[2];
ktmp[18] = k2[2];
ktmp[19] = k3[2];
ktmp[20] = k4[2];
ktmp[21] = k5[2];
ktmp[22] = k6[2];
ktmp[23] = k7[2];
ktmp[24] = k0[3];
ktmp[25] = k1[3];
ktmp[26] = k2[3];
ktmp[27] = k3[3];
ktmp[28] = k4[3];
ktmp[29] = k5[3];
ktmp[30] = k6[3];
ktmp[31] = k7[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
ktmp += 32;
}
}
#endif
for (; p + 3 < outch; p += 4)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
#if __aarch64__
float* ktmp = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4);
#else
float* ktmp = kernel_tm_pack4.channel(p / 4);
#endif
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k0[1];
ktmp[5] = k1[1];
ktmp[6] = k2[1];
ktmp[7] = k3[1];
ktmp[8] = k0[2];
ktmp[9] = k1[2];
ktmp[10] = k2[2];
ktmp[11] = k3[2];
ktmp[12] = k0[3];
ktmp[13] = k1[3];
ktmp[14] = k2[3];
ktmp[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
ktmp += 16;
}
}
for (; p < outch; p++)
{
const float* k0 = (const float*)kernel + p * inch;
#if __aarch64__
float* ktmp = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* ktmp = kernel_tm_pack4.channel(p / 4 + p % 4);
#endif
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k0[2];
ktmp[3] = k0[3];
k0 += 4;
ktmp += 4;
}
}
}
static void conv1x1s1_sgemm_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
#if __aarch64__
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, elemsize, elempack, opt.workspace_allocator);
#else
Mat tmp(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size = 0;
int remain_size_start = 0;
#if __aarch64__
nn_size = size / 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
// transpose 4x12
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"sub %0, %0, #128 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#else // __aarch64__
nn_size = size >> 3;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
float* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
// transpose 4x8
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #64 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d16-d19}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d20-d23}, [%0 :128] \n"
"sub %0, %0, #96 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"vst1.f32 {d22-d23}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
// transpose 4x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128] \n"
"sub %0, %0, #32 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
float* tmpptr = tmp.channel(i / 12);
const float* kptr = (const float*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v8.4s, v30.s[0] \n"
"dup v9.4s, v30.s[0] \n"
"dup v10.4s, v30.s[0] \n"
"dup v11.4s, v30.s[1] \n"
"dup v12.4s, v30.s[1] \n"
"dup v13.4s, v30.s[1] \n"
"dup v14.4s, v30.s[2] \n"
"dup v15.4s, v30.s[2] \n"
"dup v16.4s, v30.s[2] \n"
"dup v17.4s, v30.s[3] \n"
"dup v18.4s, v30.s[3] \n"
"dup v19.4s, v30.s[3] \n"
"dup v20.4s, v31.s[0] \n"
"dup v21.4s, v31.s[0] \n"
"dup v22.4s, v31.s[0] \n"
"dup v23.4s, v31.s[1] \n"
"dup v24.4s, v31.s[1] \n"
"dup v25.4s, v31.s[1] \n"
"dup v26.4s, v31.s[2] \n"
"dup v27.4s, v31.s[2] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[3] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
"st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n"
"st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n"
"st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n"
"st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr = (const float*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v16.4s, v30.s[0] \n"
"dup v17.4s, v30.s[0] \n"
"dup v18.4s, v30.s[1] \n"
"dup v19.4s, v30.s[1] \n"
"dup v20.4s, v30.s[2] \n"
"dup v21.4s, v30.s[2] \n"
"dup v22.4s, v30.s[3] \n"
"dup v23.4s, v30.s[3] \n"
"dup v24.4s, v31.s[0] \n"
"dup v25.4s, v31.s[0] \n"
"dup v26.4s, v31.s[1] \n"
"dup v27.4s, v31.s[1] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[2] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = (const float*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v22.4s, v23.4s}, [%22] \n"
"dup v16.4s, v22.s[0] \n"
"dup v17.4s, v22.s[1] \n"
"dup v18.4s, v22.s[2] \n"
"dup v19.4s, v22.s[3] \n"
"dup v20.4s, v23.s[0] \n"
"dup v21.4s, v23.s[1] \n"
"dup v22.4s, v23.s[2] \n"
"dup v23.4s, v23.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"st1 {v20.4s}, [%5], #16 \n"
"st1 {v21.4s}, [%6], #16 \n"
"st1 {v22.4s}, [%7], #16 \n"
"st1 {v23.4s}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = (const float*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%22] \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4s}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"st1 {v16.s}[0], [%1], #4 \n"
"st1 {v16.s}[1], [%2], #4 \n"
"st1 {v16.s}[2], [%3], #4 \n"
"st1 {v16.s}[3], [%4], #4 \n"
"st1 {v17.s}[0], [%5], #4 \n"
"st1 {v17.s}[1], [%6], #4 \n"
"st1 {v17.s}[2], [%7], #4 \n"
"st1 {v17.s}[3], [%8], #4 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
float* tmpptr = tmp.channel(i / 12);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v19.4s}, [%14] \n"
"dup v8.4s, v19.s[0] \n"
"dup v9.4s, v19.s[0] \n"
"dup v10.4s, v19.s[0] \n"
"dup v11.4s, v19.s[1] \n"
"dup v12.4s, v19.s[1] \n"
"dup v13.4s, v19.s[1] \n"
"dup v14.4s, v19.s[2] \n"
"dup v15.4s, v19.s[2] \n"
"dup v16.4s, v19.s[2] \n"
"dup v17.4s, v19.s[3] \n"
"dup v18.4s, v19.s[3] \n"
"dup v19.4s, v19.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8);
const float* kptr = (const float*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v15.4s}, [%14] \n"
"dup v8.4s, v15.s[0] \n"
"dup v9.4s, v15.s[0] \n"
"dup v10.4s, v15.s[1] \n"
"dup v11.4s, v15.s[1] \n"
"dup v12.4s, v15.s[2] \n"
"dup v13.4s, v15.s[2] \n"
"dup v14.4s, v15.s[3] \n"
"dup v15.4s, v15.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"vld1.f32 {d30-d31}, [%14] \n"
"vdup.f32 q8, d30[0] \n"
"vdup.f32 q9, d30[0] \n"
"vdup.f32 q10, d30[1] \n"
"vdup.f32 q11, d30[1] \n"
"vdup.f32 q12, d31[0] \n"
"vdup.f32 q13, d31[0] \n"
"vdup.f32 q14, d31[1] \n"
"vdup.f32 q15, d31[1] \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"vst1.f32 {d24-d27}, [%3 :128]! \n"
"vst1.f32 {d28-d31}, [%4 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = (const float*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v11.4s}, [%14] \n"
"dup v8.4s, v11.s[0] \n"
"dup v9.4s, v11.s[1] \n"
"dup v10.4s, v11.s[2] \n"
"dup v11.4s, v11.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d22-d23}, [%14] \n"
"vdup.f32 q8, d22[0] \n"
"vdup.f32 q9, d22[1] \n"
"vdup.f32 q10, d23[0] \n"
"vdup.f32 q11, d23[1] \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%2 :128]! \n"
"vst1.f32 {d20-d21}, [%3 :128]! \n"
"vst1.f32 {d22-d23}, [%4 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = (const float*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v8.4s}, [%14] \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.s}[0], [%1], #4 \n"
"st1 {v8.s}[1], [%2], #4 \n"
"st1 {v8.s}[2], [%3], #4 \n"
"st1 {v8.s}[3], [%4], #4 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d16-d17}, [%14] \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16[0]}, [%1]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vst1.f32 {d17[0]}, [%3]! \n"
"vst1.f32 {d17[1]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
float* tmpptr = tmp.channel(i / 12);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch; // inch always > 0
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"dup v10.4s, %w8 \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* tmpptr = tmp.channel(i / 8);
const float* kptr = (const float*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"vdup.f32 q9, %8 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d24-d31} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = (const float*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = (const float*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = (const float*)kernel.channel(p / 4 + p % 4);
#endif
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < inch; q++)
{
float32x4_t _r0 = vld1q_f32(tmpptr);
float32x4_t _k0 = vld1q_f32(kptr);
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
tmpptr += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
outptr0[0] = bias0 + sum0;
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to1_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
RayleighMixtureSummedAreaTableCFAR.h | #pragma once
#include <iostream>
#include <opencv2\opencv.hpp>
#include "TileManager.h"
#include "SummedAreaTableTargetDetector.h"
#include "TargetDetectorConsoleLogger.h"
#include "targetDetectors\AbstractCFAR.h"
using namespace std;
using namespace cv;
class RayleighMixtureSummedAreaTableCFAR : public AbstractCFAR {
public:
RayleighMixtureSummedAreaTableCFAR()
{
}
virtual AbstractCFAR* clone()
{
return new RayleighMixtureSummedAreaTableCFAR;
}
virtual Mat execute(Mat image, double probabilityOfFalseAlarm, map<string, double>& parameters)
{
const int guardRadius = (int)getParameterValue(parameters, "RmSAT-CFAR.guardRadius", 5);
const int clutterRadius = (int)getParameterValue(parameters, "RmSAT-CFAR.clutterRadius", 5);
const int minimumMixtureCount = (int)getParameterValue(parameters, "RmSAT-CFAR.minimumMixtureCount", 1);
const int maximumMixtureCount = (int)getParameterValue(parameters, "RmSAT-CFAR.maximumMixtureCount", 5);
// fit histogram into mixture of Rayleighs
const int tileSize = 1024;
const int bandSize = getBandWidth(parameters);
TileManager tileManager(image, tileSize, bandSize, CV_8UC1);
vector<pair<int, int>> tileIndices = tileManager.getTileIndices();
const int threadCount = min(getThreadCount(), (int)tileIndices.size());
omp_set_nested(1);
Mat globalHistogram = createHistogram(image, tileSize, threadCount);
TargetDetectorConsoleLogger targetDetectorConsoleLogger;
int i;
Rect workingRect;
Mat inputTile;
Mat targetTile;
pair<int, int> tileIndex;
SummedAreaTableTargetDetector* targetDetector = NULL;
#pragma omp parallel private(targetDetector) num_threads(threadCount)
{
targetDetector = new SummedAreaTableTargetDetector(minimumMixtureCount, maximumMixtureCount, guardRadius, clutterRadius);
// set logger
///targetDetector->setLogger(&targetDetectorConsoleLogger);
#pragma omp for private(i, tileIndex, inputTile, targetTile, workingRect) schedule(dynamic, 1)
for (i = 0; i<tileIndices.size(); i++) {
tileIndex = tileIndices.at(i);
workingRect = tileManager.getTileWorkingRectangle(tileIndex);
inputTile = createRayleighCompliantTile(tileManager.getInputTile(tileIndex));
if (targetTile.cols != inputTile.cols || targetTile.rows != inputTile.rows) {
targetTile = Mat(inputTile.rows, inputTile.cols, CV_8UC1);
}
targetDetector->execute(inputTile, targetTile, globalHistogram, probabilityOfFalseAlarm, workingRect);
tileManager.setResultTile(tileIndex, targetTile);
}
delete targetDetector;
}
Mat targetMap = tileManager.getResultImage();
return targetMap;
}
virtual int getClutterArea(map<string, double>& parameters)
{
const int guardRadius = (int)parameters["RmSAT-CFAR.guardRadius"];
const int clutterRadius = (int)parameters["RmSAT-CFAR.clutterRadius"];
const int windowRadius = (guardRadius + clutterRadius);
const int clutterArea = sqr(2 * windowRadius + 1) - sqr(2 * guardRadius + 1);
return clutterArea;
}
virtual int getBandWidth(map<string, double>& parameters)
{
const int guardRadius = (int)parameters["RmSAT-CFAR.guardRadius"];
const int clutterRadius = (int)parameters["RmSAT-CFAR.clutterRadius"];
const int windowRadius = (guardRadius + clutterRadius);
return calculateBandSize(windowRadius);
}
virtual bool isDeterministic() const { return false; }
virtual bool requiresGlobalHistogram() const { return true; }
private:
static Mat createRayleighCompliantTile(Mat& tile)
{
Mat refinedTile(tile.rows, tile.cols, tile.type());
switch (tile.type())
{
case CV_8U: createRayleighCompliantTile<unsigned char>(tile, refinedTile); break;
case CV_8S: createRayleighCompliantTile<char>(tile, refinedTile); break;
case CV_16U: createRayleighCompliantTile<unsigned short>(tile, refinedTile); break;
case CV_16S: createRayleighCompliantTile<short>(tile, refinedTile); break;
case CV_32S: createRayleighCompliantTile<int>(tile, refinedTile); break;
default: refinedTile = tile.clone();
}
return refinedTile;
}
template<typename T>
static void createRayleighCompliantTile(Mat& tile, Mat& refinedTile)
{
Mat tileHistogram = ImageUtilities::createHistogram(tile);
const double backgroundStartPercentile = 0.005;
const double backgroundStart = ImageUtilities::getPercentileIndex<int>(tileHistogram, backgroundStartPercentile);
for (int y = 0; y < tile.rows; y++) {
T* irow = (T*)(tile.data + y * tile.step);
T* rirow = (T*)(refinedTile.data + y * refinedTile.step);
for (int x = 0; x < tile.cols; x++) {
if (irow[x] - backgroundStart > 1)
rirow[x] = (T)(max(irow[x] - backgroundStart, 0.0) + 1.0);
else
rirow[x] = 0;
}
}
}
static Mat createHistogram(Mat& image, int tileSize, int simultaneouslyExecutedTile)
{
const int gridXcount = (image.cols + tileSize - 1) / tileSize;
const int gridYcount = (image.rows + tileSize - 1) / tileSize;
Mat histogram;
Mat tile;
Mat privateHistogram;
int* hptr;
int* phptr;
int x, y, x1, y1, x2, y2, i;
#pragma omp parallel private(privateHistogram)
{
#pragma omp for private(tile, x, y, x1, y1, x2, y2, i, hptr, phptr)
for (y = 0; y < gridYcount; y++) {
y1 = y * tileSize;
y2 = min(y1 + tileSize, image.rows);
for (x = 0; x < gridXcount; x++) {
x1 = x * tileSize;
x2 = min(x1 + tileSize, image.cols);
tile = image(Range(y1, y2), Range(x1, x2));
ImageUtilities::addToHistogram(tile, privateHistogram);
}
}
#pragma omp critical
{
if (histogram.empty())
histogram = privateHistogram.clone();
else {
if (!privateHistogram.empty()) {
hptr = (int*)histogram.data;
phptr = (int*)privateHistogram.data;
for (i = 0; i < histogram.cols; i++) {
hptr[i] += phptr[i];
}
}
}
}
}
return histogram;
}
};
|
guide.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
float gaussian(float x, float y, float v, float k, float c)
{
return 1-v + v*k*exp(-(x*x+y*y)/(2*c*c));
}
//unsigned char *forward_warping(const void *src, const void *idx, const void *idy, const void *z, int h, int w)
void build_guide(const void *flow_x, const void *flow_y, const void *flow_valid, void *result, const int b, const int h, const int w, const float k, const float c)
{
float *forward_x = (float *)calloc(b * h * w, sizeof(float));
float *forward_y = (float *)calloc(b * h * w, sizeof(float));
for (int z = 0; z < b; z++)
#pragma omp parallel for collapse(2)
for (int y0 = 0; y0 < h; y0++)
for (int x0 = 0; x0 < w; x0++)
{
if ( ((float*)flow_valid)[z*w*h + y0*w + x0] != 0)
{
forward_x[z*w*h + y0*w + x0] = ((float*)flow_x)[z*w*h + y0*w + x0] + x0;
forward_y[z*w*h + y0*w + x0] = ((float*)flow_y)[z*w*h + y0*w + x0] + y0;
}
for (int y1 = 0; y1 < h; y1++)
for (int x1 = 0; x1 < w; x1++)
{
// Accessing cell y=y0*w+x0 , x=y1*w+x1
// y term multiplied by row length (h*w)
((float*)result)[z*w*h*w*h + (y0*w+x0)*w*h + y1*w+x1] = gaussian(x1-forward_x[z*w*h + y0*w + x0], y1-forward_y[z*w*h + y0*w + x0], ((float*)flow_valid)[z*w*h + y0*w + x0], k, c);
}
}
free(forward_x);
free(forward_y);
return;
}
|
Dijkstra.c | // OpenMP example program: Dijkstra shortest-path finder in a
// bidirectional graph
// serves as a tutorial to OpenMP; see notes in comments at the end of
// the file
// each thread handles one chunk of vertices
// usage: dijkstra
#include <stdio.h>
#define LARGEINT 2<<30-1 // "infinity"
#define NV 6
// global variables, all shared by all threads by default
int ohd[NV][NV], // 1-hop distances between vertices
mind[NV], // min distances found so far
notdone[NV], // vertices not checked yet
nth, // number of threads
chunk, // number of vertices handled by each thread
md, // current min over all threads
mv; // vertex which achieves that min
void init(int ac, char **av)
{ int i,j;
for (i = 0; i < NV; i++)
for (j = 0; j < NV; j++) {
if (j == i) ohd[i][i] = 0;
else ohd[i][j] = LARGEINT;
}
ohd[0][1] = ohd[1][0] = 40;
ohd[0][2] = ohd[2][0] = 15;
ohd[1][2] = ohd[2][1] = 20;
ohd[1][3] = ohd[3][1] = 10;
ohd[1][4] = ohd[4][1] = 25;
ohd[2][3] = ohd[3][2] = 100;
ohd[1][5] = ohd[5][1] = 6;
ohd[4][5] = ohd[5][4] = 8;
for (i = 1; i < NV; i++) {
notdone[i] = 1;
mind[i] = ohd[0][i];
}
}
// finds closest to 0 among notdone, among s through e
void findmymin(int s, int e, int *d, int *v)
{ int i;
*d = LARGEINT;
for (i = s; i <= e; i++)
if (notdone[i] && mind[i] < *d) {
*d = ohd[0][i];
*v = i;
}
}
// for each i in [s,e], ask whether a shorter path to i exists, through
// mv
void updateohd(int s, int e)
{ int i;
for (i = s; i <= e; i++)
if (mind[mv] + ohd[mv][i] < mind[i])
mind[i] = mind[mv] + ohd[mv][i];
}
void dowork()
{
#pragma omp parallel // Note 1
{ int startv,endv, // start, end vertices for this thread
step, // whole procedure goes NV steps
mymd, // min value found by this thread
mymv, // vertex which attains that value
me = omp_get_thread_num(); // my thread number
#pragma omp single // Note 2
{ nth = omp_get_num_threads(); chunk = NV/nth;
printf("there are %d threads\n",nth); }
// Note 3
startv = me * chunk;
endv = startv + chunk - 1;
for (step = 0; step < NV; step++) {
// find closest vertex to 0 among notdone; each thread finds
// closest in its group, then we find overall closest
#pragma omp single
{ md = LARGEINT; mv = 0; }
findmymin(startv,endv,&mymd,&mymv);
// update overall min if mine is smaller
#pragma omp critical // Note 4
{ if (mymd < md)
{ md = mymd; mv = mymv; }
}
// mark new vertex as done
#pragma omp single
{ notdone[mv] = 0; }
// now update my section of ohd
updateohd(startv,endv);
#pragma omp barrier
}
}
}
int main(int argc, char **argv)
{ int i;
init(argc,argv);
dowork();
// back to single thread now
printf("minimum distances:\n");
for (i = 1; i < NV; i++)
printf("%d\n",mind[i]);
}
// tutorial notes:
// 1. OpenMP works via a preprocessor, which translates pragmas to
// threads calls. Note that the sharp sign ('#') must be the first
// character in the line, other than blanks.
//
// The "parallel" clause says, "Have each thread do this block"
// (enclosed by braces). Code not in a block with a "parallel"
// pragma is done only by the master thread.
// 2. The "single" clause says, "Have only one thread (whichever hits
// this line first) execute the following block."
// In this case, we are calling the OMP function
// omp_get_num_threads(), which of course returns the number of
// threads. Since we assign the return value to the global variable
// nth, only one thread needs to do this, so we use "single". And
// thought there would be no harm (other than a delay) if all
// threads did this, in some applications we would need to limit an
// action to just one thread.
// 3. The "barrier" clause does the standard barrier operation. Note
// carefully that there are also implicit barriers following blocks
// to which various OpenMP pragmas apply, such as "for" and
// "single". One can override those implicit barriers by using the
// "nowait" clause. On platforms with nonsequential memory
// consistency, you can also use the "flush" directive to force a
// memory update.
// 4. The "critical" clause sets up a critical section, with invisible
// lock/unlock operations. Note carefully that the clause may be
// followed by an optional name, which is crucial in some
// applications. All critical sections with the same name
// are guarded by the same (invisible) locks. Those with
// no name are also guarded by the same locks, so the programmer
// could really lose parallelism if he/she were not aware of this.
// Certain very specialized one-statement critical sections can be
// handled more simply and efficiently using the "atomic"
// directive, e.g.
// #pragma omp atomic
// y += x;
// Note that that statment can NOT be a block.
|
gt.mapset.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.mapset.c
* DATE: 08/11/2012
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Utility to perform set operations {UNION,INTERSECTION,DIFFERENCE} over alignment files {MAP,SAM}
*/
#include <getopt.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
typedef enum { GT_MAP_SET_UNKNOWN,
GT_MAP_SET_INTERSECTION, GT_MAP_SET_UNION, GT_MAP_SET_DIFFERENCE,
GT_MAP_SET_JOIN, GT_MAP_SET_COMPARE,
GT_MERGE_MAP, GT_DISPLAY_COMPACT_MAP} gt_operation;
typedef struct {
gt_operation operation;
char* name_input_file_1;
char* name_input_file_2;
char* name_output_file;
bool mmap_input;
bool paired_end;
bool files_contain_same_reads;
double eq_threshold;
bool strict;
bool verbose;
uint64_t num_threads;
} gt_stats_args;
gt_stats_args parameters = {
.operation=GT_MAP_SET_UNKNOWN,
.name_input_file_1=NULL,
.name_input_file_2=NULL,
.name_output_file=NULL,
.mmap_input=false,
.paired_end=false,
.files_contain_same_reads=false,
.eq_threshold=0.5,
.strict=false,
.verbose=false,
.num_threads=1
};
uint64_t current_read_length;
int64_t gt_mapset_map_cmp(gt_map* const map_1,gt_map* const map_2) {
const uint64_t eq_threshold = (parameters.eq_threshold <= 1.0) ?
parameters.eq_threshold*current_read_length: parameters.eq_threshold;
return parameters.strict ? gt_map_cmp(map_1,map_2) : gt_map_range_cmp(map_1,map_2,eq_threshold);
}
int64_t gt_mapset_mmap_cmp(gt_map** const map_1,gt_map** const map_2,const uint64_t num_maps) {
const uint64_t eq_threshold = (parameters.eq_threshold <= 1.0) ?
parameters.eq_threshold*current_read_length: parameters.eq_threshold;
return parameters.strict ? gt_mmap_cmp(map_1,map_2,num_maps) : gt_mmap_range_cmp(map_1,map_2,num_maps,eq_threshold);
}
GT_INLINE gt_status gt_mapset_read_template_sync(
gt_buffered_input_file* const buffered_input_master,gt_buffered_input_file* const buffered_input_slave,
gt_buffered_output_file* const buffered_output,gt_template* const template_master,gt_template* const template_slave,
const gt_operation operation) {
// Read master
gt_status error_code_master, error_code_slave;
gt_output_map_attributes* output_attributes = gt_output_map_attributes_new();
gt_generic_parser_attributes* generic_parser_attr = gt_input_generic_parser_attributes_new(parameters.paired_end);
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Master>>");
}
// Read slave
if ((error_code_slave=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Slave>>");
}
// Check EOF conditions
if (error_code_master==GT_IMP_EOF) {
if (error_code_slave!=GT_IMP_EOF) {
gt_fatal_error_msg("<<Slave>> contains more/different reads from <<Master>>");
}
return GT_IMP_EOF;
} else if (error_code_slave==GT_IMP_EOF) { // Slave exhausted. Dump master & return EOF
do {
if (error_code_master==GT_IMP_FAIL) gt_fatal_error_msg("Fatal error parsing file <<Master>>");
if (operation==GT_MAP_SET_UNION || operation==GT_MAP_SET_DIFFERENCE) {
gt_output_map_bofprint_template(buffered_output,template_master,output_attributes);
}
} while ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr)));
return GT_IMP_EOF;
}
// Synch loop
while (gt_string_cmp(gt_template_get_string_tag(template_master),
gt_template_get_string_tag(template_slave))) {
// Print non correlative master's template
if (operation==GT_MAP_SET_UNION || operation==GT_MAP_SET_DIFFERENCE) {
gt_output_map_bofprint_template(buffered_output,template_master,output_attributes);
}
// Fetch next master's template
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))!=GT_IMP_OK) {
gt_fatal_error_msg("<<Slave>> contains more/different reads from <<Master>>");
}
}
return GT_IMP_OK;
}
GT_INLINE gt_status gt_mapset_read_template_get_commom_map(
gt_buffered_input_file* const buffered_input_master,gt_buffered_input_file* const buffered_input_slave,
gt_template* const template_master,gt_template* const template_slave) {
gt_status error_code_master, error_code_slave;
gt_generic_parser_attributes* generic_parser_attr = gt_input_generic_parser_attributes_new(parameters.paired_end);
// Read master
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_master,template_master,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Master>>");
}
if (error_code_master==GT_IMP_EOF) return GT_IMP_EOF;
// Read slave
if ((error_code_slave=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))==GT_IMP_FAIL) {
gt_fatal_error_msg("Fatal error parsing file <<Slave>>");
}
if (error_code_slave==GT_IMP_EOF) { // Check EOF conditions
gt_fatal_error_msg("<<Slave>> is not contained in master <<Master>> (looking for '"PRIgts"')",
PRIgts_content(gt_template_get_string_tag(template_master)));
}
// Synch loop
while (gt_string_cmp(gt_template_get_string_tag(template_master),gt_template_get_string_tag(template_slave))) {
// Fetch next slave's template
if ((error_code_master=gt_input_generic_parser_get_template(
buffered_input_slave,template_slave,generic_parser_attr))!=GT_IMP_OK) {
gt_fatal_error_msg("<<Slave>> is not contained in master <<Master>> (looking for '"PRIgts"')",
PRIgts_content(gt_template_get_string_tag(template_master)));
}
}
return GT_IMP_OK;
}
void gt_mapset_perform_set_operations() {
// File IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Buffered I/O
gt_buffered_input_file* buffered_input_1 = gt_buffered_input_file_new(input_file_1);
gt_buffered_input_file* buffered_input_2 = gt_buffered_input_file_new(input_file_2);
gt_buffered_output_file* buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input_1,buffered_output);
// Template I/O (synch)
gt_template *template_1 = gt_template_new();
gt_template *template_2 = gt_template_new();
gt_output_map_attributes* output_attributes = gt_output_map_attributes_new();
while (gt_mapset_read_template_sync(buffered_input_1,buffered_input_2,
buffered_output,template_1,template_2,parameters.operation)) {
// Record current read length
current_read_length = gt_template_get_total_length(template_1);
// Apply operation
gt_template *ptemplate;
switch (parameters.operation) {
case GT_MAP_SET_UNION:
ptemplate=gt_template_union_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
case GT_MAP_SET_INTERSECTION:
ptemplate=gt_template_intersect_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
case GT_MAP_SET_DIFFERENCE:
ptemplate=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
break;
default:
gt_fatal_error(SELECTION_NOT_VALID);
break;
}
// Print template
gt_output_map_bofprint_template(buffered_output,ptemplate,output_attributes);
// Delete template
gt_template_delete(ptemplate);
}
// Clean
gt_template_delete(template_1);
gt_template_delete(template_2);
gt_buffered_input_file_close(buffered_input_1);
gt_buffered_input_file_close(buffered_input_2);
gt_buffered_output_file_close(buffered_output);
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_perform_cmp_operations() {
// File IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Buffered I/O
gt_buffered_input_file* buffered_input_1 = gt_buffered_input_file_new(input_file_1);
gt_buffered_input_file* buffered_input_2 = gt_buffered_input_file_new(input_file_2);
gt_buffered_output_file* buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input_1,buffered_output);
// Template I/O (synch)
gt_template *template_1 = gt_template_new();
gt_template *template_2 = gt_template_new();
gt_output_map_attributes* output_map_attributes = gt_output_map_attributes_new();
while (gt_mapset_read_template_get_commom_map(buffered_input_1,buffered_input_2,template_1,template_2)) {
// Record current read length
current_read_length = gt_template_get_total_length(template_1);
// Apply operation
switch (parameters.operation) {
case GT_MAP_SET_JOIN:
// Print Master's TAG+Counters+Maps
gt_output_map_bofprint_tag(buffered_output,template_1->tag,template_1->attributes,output_map_attributes);
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_1),
template_1->attributes,output_map_attributes); // Master's Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_2),
template_1->attributes,output_map_attributes); // Slave's Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_1,output_map_attributes); // Master's Maps
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_2,output_map_attributes); // Slave's Maps
gt_bofprintf(buffered_output,"\n");
break;
case GT_MAP_SET_COMPARE: {
// Perform simple cmp operations
gt_template *template_master_minus_slave=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
gt_template *template_slave_minus_master=gt_template_subtract_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_2,template_1);
gt_template *template_intersection=gt_template_intersect_template_mmaps_fx(gt_mapset_mmap_cmp,gt_mapset_map_cmp,template_1,template_2);
/*
* Print results :: (TAG (Master-Slave){COUNTER MAPS} (Slave-Master){COUNTER MAPS} (Intersection){COUNTER MAPS})
*/
gt_output_map_bofprint_tag(buffered_output,template_1->tag,template_1->attributes,output_map_attributes);
// Counters
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_master_minus_slave),
template_master_minus_slave->attributes,output_map_attributes); // (Master-Slave){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_slave_minus_master),
template_slave_minus_master->attributes,output_map_attributes); // (Slave-Master){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_counters(buffered_output,gt_template_get_counters_vector(template_intersection),
template_intersection->attributes,output_map_attributes); // (Intersection){COUNTER}
// Maps
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_master_minus_slave,output_map_attributes); // (Master-Slave){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_slave_minus_master,output_map_attributes); // (Slave-Master){COUNTER}
gt_bofprintf(buffered_output,"\t");
gt_output_map_bofprint_template_maps(buffered_output,template_intersection,output_map_attributes); // (Intersection){COUNTER}
gt_bofprintf(buffered_output,"\n");
// Delete templates
gt_template_delete(template_master_minus_slave);
gt_template_delete(template_slave_minus_master);
gt_template_delete(template_intersection);
}
break;
default:
gt_fatal_error(SELECTION_NOT_VALID);
break;
}
}
// Clean
gt_template_delete(template_1);
gt_template_delete(template_2);
gt_buffered_input_file_close(buffered_input_1);
gt_buffered_input_file_close(buffered_input_2);
gt_buffered_output_file_close(buffered_output);
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_perform_merge_map() {
// Open file IN/OUT
gt_input_file* input_file_1 = gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_input_file* input_file_2 = (parameters.name_input_file_2==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_2,parameters.mmap_input);
if (parameters.name_input_file_2==NULL) GT_SWAP(input_file_1,input_file_2);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
// Mutex
pthread_mutex_t input_mutex = PTHREAD_MUTEX_INITIALIZER;
// Parallel reading+process
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
if (parameters.files_contain_same_reads) {
gt_merge_synch_map_files(&input_mutex,parameters.paired_end,output_file,input_file_1,input_file_2);
} else {
gt_merge_unsynch_map_files(&input_mutex,input_file_1,input_file_2,parameters.paired_end,output_file);
}
}
// Clean
gt_input_file_close(input_file_1);
gt_input_file_close(input_file_2);
gt_output_file_close(output_file);
}
void gt_mapset_display_compact_map() {
// Open file IN/OUT
gt_input_file* input_file = (parameters.name_input_file_1==NULL) ?
gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file_1,parameters.mmap_input);
gt_output_file* output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
gt_output_map_attributes* const output_map_attributes = gt_output_map_attributes_new();
output_map_attributes->compact = true;
GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) {
GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) {
// Print compact summary
gt_bofprintf(buffered_output,"End1::"PRIgts"[%"PRIu64"]\t",PRIgts_content(alignment->tag),gt_string_get_length(alignment->read));
gt_output_map_bofprint_counters(buffered_output,alignment->counters,alignment->attributes,output_map_attributes);
gt_bofprintf(buffered_output,"\t");
uint64_t printed = 0;
GT_ALIGNMENT_ITERATE(alignment,map) {
if (printed>0) {
gt_bofprintf(buffered_output,","PRIgts,PRIgts_content(map->seq_name));
} else {
gt_bofprintf(buffered_output,PRIgts,PRIgts_content(map->seq_name));
}
++printed;
}
gt_bofprintf(buffered_output,"\n");
}
} GT_END_READING_WRITING_LOOP(input_file,output_file,template);
// Clean
gt_output_map_attributes_delete(output_map_attributes);
}
// Clean
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
#define GT_MAPSET_OPERATIONS "union,intersection,difference,compare,join,merge-map,display-compact"
void gt_filter_parse_operation(char* const string_operation) {
if (gt_streq(string_operation,"INTERSECCTION") || gt_streq(string_operation,"Intersection") || gt_streq(string_operation,"intersection")) {
parameters.operation = GT_MAP_SET_INTERSECTION;
} else if (gt_streq(string_operation,"UNION") || gt_streq(string_operation,"Union") || gt_streq(string_operation,"union")) {
parameters.operation = GT_MAP_SET_UNION;
} else if (gt_streq(string_operation,"DIFFERENCE") || gt_streq(string_operation,"Difference") || gt_streq(string_operation,"difference")) {
parameters.operation = GT_MAP_SET_DIFFERENCE;
} else if (gt_streq(string_operation,"COMPARE") || gt_streq(string_operation,"Compare") || gt_streq(string_operation,"compare")) {
parameters.operation = GT_MAP_SET_COMPARE;
} else if (gt_streq(string_operation,"JOIN") || gt_streq(string_operation,"Join") || gt_streq(string_operation,"join")) {
parameters.operation = GT_MAP_SET_JOIN;
} else if (gt_streq(string_operation,"MERGE-MAP") || gt_streq(string_operation,"Merge-map") || gt_streq(string_operation,"merge-map")) {
parameters.operation = GT_MERGE_MAP;
} else if (gt_streq(string_operation,"DISPLAY-COMPACT") || gt_streq(string_operation,"Display-compact") || gt_streq(string_operation,"display-compact")) {
parameters.operation = GT_DISPLAY_COMPACT_MAP;
} else {
if (string_operation[0]=='I' || string_operation[0]=='i') {
fprintf(stderr,"\tAssuming 'Intersection' ...\n");
parameters.operation = GT_MAP_SET_INTERSECTION;
} else if (string_operation[0]=='U' || string_operation[0]=='u') {
fprintf(stderr,"\tAssuming 'Union' ...\n");
parameters.operation = GT_MAP_SET_UNION;
} else if (string_operation[0]=='D' || string_operation[0]=='d') {
fprintf(stderr,"\tAssuming 'Difference' ...\n");
parameters.operation = GT_MAP_SET_DIFFERENCE;
} else if (string_operation[0]=='C' || string_operation[0]=='c') {
fprintf(stderr,"\tAssuming 'Compare' ...\n");
parameters.operation = GT_MAP_SET_COMPARE;
} else if (string_operation[0]=='P' || string_operation[0]=='p') {
fprintf(stderr,"\tAssuming 'Join' ...\n");
parameters.operation = GT_MAP_SET_JOIN;
} else if (string_operation[0]=='M' || string_operation[0]=='m') {
fprintf(stderr,"\tAssuming 'Merge-map' ...\n");
parameters.operation = GT_MERGE_MAP;
} else {
gt_fatal_error_msg("Unknown operation '%s' in {"GT_MAPSET_OPERATIONS"}",string_operation);
}
}
}
void parse_arguments(int argc,char** argv) {
struct option* gt_mapset_getopt = gt_options_adaptor_getopt(gt_mapset_options);
gt_string* const gt_mapset_short_getopt = gt_options_adaptor_getopt_short(gt_mapset_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_mapset_short_getopt),gt_mapset_getopt,&option_index))==-1) break;
// c=getopt_long(argc,argv,"i:o:psht:v",long_options,&option_index);
switch (option) {
/* Operations */
case 'C':
gt_filter_parse_operation(optarg);
break;
/* I/O */
case 300:
parameters.name_input_file_1 = optarg;
break;
case 301:
parameters.name_input_file_2 = optarg;
break;
case 'p':
parameters.paired_end = true;
break;
case 302:
parameters.mmap_input = true;
gt_fatal_error(NOT_IMPLEMENTED);
break;
case 'o':
parameters.name_output_file = optarg;
break;
/* Compare Function */
case 's': // files-with-same-reads
parameters.files_contain_same_reads = true;
break;
case 400: // eq-th
parameters.eq_threshold = atof(optarg);
break;
case 401: // strict
parameters.strict = true;
break;
/* Misc */
case 'v':
parameters.verbose = true;
break;
case 't':
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
break;
case 'h':
fprintf(stderr, "USE: ./gt.mapset [OPERATION] [ARGS]...\n");
gt_options_fprint_menu(stderr,gt_mapset_options,gt_mapset_groups,false,false);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_mapset_options,gt_mapset_groups,true,false);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
// Check parameters
if (parameters.operation==GT_MAP_SET_UNKNOWN) {
gt_fatal_error_msg("Please specify operation {"GT_MAPSET_OPERATIONS"}");
}
if (parameters.operation!=GT_DISPLAY_COMPACT_MAP && !parameters.name_input_file_1) {
gt_fatal_error_msg("Input file 1 required (--i1)\n");
}
// Free
gt_string_delete(gt_mapset_short_getopt);
}
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
// Do it !
if (parameters.operation==GT_MERGE_MAP) {
gt_mapset_perform_merge_map();
} else if (parameters.operation==GT_DISPLAY_COMPACT_MAP) {
gt_mapset_display_compact_map();
} else if (parameters.operation==GT_MAP_SET_INTERSECTION ||
parameters.operation==GT_MAP_SET_UNION ||
parameters.operation==GT_MAP_SET_DIFFERENCE) {
gt_mapset_perform_set_operations();
} else {
gt_mapset_perform_cmp_operations();
}
return 0;
}
|
GB_binop__band_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_int32
// A.*B function (eWiseMult): GB_AemultB__band_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__band_int32
// C+=b function (dense accum): GB_Cdense_accumb__band_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_int32
// C=scalar+B GB_bind1st__band_int32
// C=scalar+B' GB_bind1st_tran__band_int32
// C=A+scalar GB_bind2nd__band_int32
// C=A'+scalar GB_bind2nd_tran__band_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT32 || GxB_NO_BAND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__band_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hipc.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
inline void scanint(int *x)
{
register char c = getchar_unlocked();
*x = 0;
for(; (c<48)||(c>57);c = getchar_unlocked());
for(; (c>47)&&(c<58);c = getchar_unlocked())
*x = (int)((((*x)<<1) + ((*x)<<3)) + c - 48);
}
inline void printint(int n)
{
char buf[11];
buf[10] = '\n';
int i = 9;
while(n)
{
buf[i--] = n % 10 + '0';
n /= 10;
}
while(buf[i] != '\n')
putchar_unlocked(buf[++i]);
}
struct timeval start_total_time, end_total_time; /*Algo times*/
struct timeval start_read_time, end_read_time; /* io need 2 timers */
struct timeval start_write_time, end_write_time;
struct timeval start_endtoend_time, end_endtoend_time; /* end to end time*/
struct Point
{
int x;
int y;
int z;
};
struct Edge
{
int first;
int second;
};
struct hashNode
{
struct Point *key;
int index;
struct hashNode *next;
};
struct hashMap
{
int size1;
struct hashNode **root;
} hashMap1;
int equalsPoint(struct Point *a,struct Point *b)
{
return (a->x==b->x && a->y==b->y && a->z==b->z);
}
void init_hashMap(int n)
{
hashMap1.size1 = n;
hashMap1.root = (struct hashNode **)malloc(n*sizeof(struct hashNode *));
int i;
for(i=0;i<n;i++)
{
hashMap1.root[i] = NULL;
}
}
void insertInLinkedList(int listIndex,struct Point *point,int index)
{
struct hashNode *soonToBeRoot = (struct hashNode *)malloc(sizeof(struct hashNode ));
soonToBeRoot->next = hashMap1.root[listIndex];
hashMap1.root[listIndex] = soonToBeRoot;
soonToBeRoot->key = point;
soonToBeRoot->index = index;
}
int searchInLinkedList(int listIndex,struct Point *point)
{
struct hashNode *root = hashMap1.root[listIndex];
while(root)
{
if(equalsPoint(root->key,point))
return root->index;
root = root -> next ;
}
return -1;
}
void insert_hashMap(struct Point *point,int index)
{
int hashCode = hashFunc(point);
insertInLinkedList(hashCode%(hashMap1.size1),point,index);
}
int hashFunc(struct Point *p)
{
int val1 = (p->x*997*661 + p->y*661 + p->z);
return val1&0x7FFFFFFF;
}
int getIndex(struct Point *point)
{
int hashCode = hashFunc(point);
return searchInLinkedList(hashCode%(hashMap1.size1),point);
}
int *parent;
int findRoot(int x)
{
if(parent[x]==x)
return x;
return ( parent[x] = findRoot(parent[x]) );
}
void union1(int x,int y)
{
int xRoot = findRoot(x);
int yRoot = findRoot(y);
if(xRoot>yRoot)
{
parent[xRoot] = yRoot;
}
else if(xRoot<yRoot)
{
parent[yRoot] = xRoot;
}
}
void beautifyOutputAndPrint(int N,struct Point *points)
{
int i;
int *prefixSum1 = (int *)malloc(N*sizeof(int));
int count1 = 0;
for(i=0;i<N;i++)
{
if(parent[i]==i)
prefixSum1[i] = ++count1;
}
for(i=0;i<N;i++)
{
printint(prefixSum1[findRoot(i)]);
}
}
int main(int argc,char* argv[])
{
gettimeofday(&start_endtoend_time,NULL);
if(argc < 2)
{
printf("Input Format: <NumberOfLines>\n");
exit(1);
}
int n,i;
n = atoi(argv[1]);
struct Point *points = (struct Point *)malloc(n*sizeof(struct Point));
struct Edge *edges = (struct Edge *)malloc(6*n*sizeof(struct Edge));
parent = (int *)malloc(n*sizeof(int));
init_hashMap(n);
int m = -1;
gettimeofday(&start_read_time,NULL);
for(i=0;i<n;i++)
{
scanint(&points[i].x);
scanint(&points[i].y);
scanint(&points[i].z);
/*scanf("%d,%d,%d\n",&points[i].x,&points[i].y,&points[i].z);*/
}
gettimeofday(&end_read_time,NULL);
omp_set_num_threads(omp_get_max_threads());
gettimeofday(&start_total_time,NULL);
for(i=0;i<n;i++)
{
insert_hashMap(&points[i],i);
parent[i] = i;
}
#pragma omp parallel
{
const int ithread = omp_get_thread_num();
const int nthreads = omp_get_num_threads();
int start1 = ithread*n/nthreads;
int end1 = (ithread+1)*n/nthreads;
int myStart = start1*6;
int i;
int index1;
for(i = start1;i<end1;i++)
{
struct Point temp = points[i];
/* changing x co-ordinate*/
temp.x++;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
temp.x--;
temp.x--;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
temp.x++;
/* changing y co-ordinate*/
temp.y++;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
temp.y--;
temp.y--;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
temp.y++;
/* changing z co-ordinate*/
temp.z++;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
temp.z--;
temp.z--;
index1 = getIndex(&temp);
if(index1 > i )
{
edges[myStart].first = i;
edges[myStart].second = index1;
myStart++;
}
}
int start2 = start1*6;
for(i=start2;i<myStart;i++)
{
#pragma critical
union1(edges[i].first,edges[i].second);
}
}
gettimeofday(&end_total_time,NULL);
gettimeofday(&start_write_time,NULL);
beautifyOutputAndPrint(n,points);
gettimeofday(&end_write_time,NULL);
gettimeofday(&end_endtoend_time,NULL);
double endtoendTime = ( end_endtoend_time.tv_usec - start_endtoend_time.tv_usec );
endtoendTime += (end_endtoend_time.tv_sec - start_endtoend_time.tv_sec)*1000000;
endtoendTime /= 1000000;
double ioTime = (end_read_time.tv_usec - start_read_time.tv_usec) + (end_read_time.tv_sec - start_read_time.tv_sec)*1000000;
ioTime += (end_write_time.tv_usec - start_write_time.tv_usec) + (end_write_time.tv_sec - start_write_time.tv_sec)*1000000;;
double algoTime = (end_total_time.tv_usec - start_total_time.tv_usec) + (end_total_time.tv_sec - start_total_time.tv_sec)*1000000;
fprintf(stderr, "--- other user output (if any) ---\n");
fprintf(stderr, "Algorithm time = %f microsecs\n",algoTime);
fprintf(stderr, "IO time = %f microsecs\n",ioTime);
fprintf(stderr, "End to end time = %f secs\n",endtoendTime);
return 0;
}
|
52be1f6a69174a0f7f7a7579802eab93d6429221.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int padfunc(struct dataobj *restrict epsilon_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m)
{
float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data;
#pragma omp target enter data map(to: epsilon[0:epsilon_vec->size[0]][0:epsilon_vec->size[1]][0:epsilon_vec->size[2]])
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
epsilon[abc_x_l + 2][y + 2][z + 2] = epsilon[12][y + 2][z + 2];
}
}
}
for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
epsilon[abc_x_r + 2][y + 2][z + 2] = epsilon[x_M - 8][y + 2][z + 2];
}
}
}
#pragma omp target teams distribute parallel for collapse(1)
for (int x = x_m; x <= x_M; x += 1)
{
for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
epsilon[x + 2][abc_y_l + 2][z + 2] = epsilon[x + 2][12][z + 2];
}
}
for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
epsilon[x + 2][abc_y_r + 2][z + 2] = epsilon[x + 2][y_M - 8][z + 2];
}
}
for (int y = y_m; y <= y_M; y += 1)
{
for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1)
{
epsilon[x + 2][y + 2][abc_z_l + 2] = epsilon[x + 2][y + 2][12];
}
for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1)
{
epsilon[x + 2][y + 2][abc_z_r + 2] = epsilon[x + 2][y + 2][z_M - 8];
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
#pragma omp target update from(epsilon[0:epsilon_vec->size[0]][0:epsilon_vec->size[1]][0:epsilon_vec->size[2]])
#pragma omp target exit data map(release: epsilon[0:epsilon_vec->size[0]][0:epsilon_vec->size[1]][0:epsilon_vec->size[2]])
return 0;
}
|
RPN.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <ctype.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/*
* RPN reversed polish notation
*
* 4/2009 Public Domain by Wesley Ebisuzaki
*
* operations:
*
* + - * /
* == != < <= > >=
*
* sqrt, sq, abs, 1/x, floor, ceil, pow (x^y), exp, ln
* min, max, merge, mask
* sin, cos, tan, asin, acos, atan, atan2
*
* pi = 3.14159...
* days_in_ref_month = number of days in the reference month
* days_in_verf_month = number of days in the verification month
*
* registers: sto_N, rcl_N, clr_N
* rcl_lat, rcl_lon
* rcl (data)
* stack: exc (swap), pop, dup, clr
*
* yrev - swap grids, north <-> south
* alt_x_scan - for Glahn packing
* xave, xdev
*
* print_(X): X=max, min, rms, corr, ave, diff
*
* at the end of rpn, the top of the stack is saved to data unless clr done first
*/
// #define N_RPN_REGS 10 moved to wgrib2.h
#define STACK_SIZE 10
extern int decode, latlon;
extern double *lat, *lon;
extern int match_flag;
extern const char *item_deliminator;
extern int use_scale;
/* note: rpn_n[N_RPN_REGS], and rpn_data[N_RPN_REG] */
size_t rpn_n[N_RPN_REGS] = { 0 };
float *rpn_data[N_RPN_REGS] = { NULL };
static float *stack[STACK_SIZE];
#define SCALAR 0
#define VECTOR 1
#define DBL_VEC 2
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
int push(int top, unsigned int ndata, int type, float f, float *ff, double *d);
static void gbl_wt(double *val, double *wt, float *data, int i, int j, int nx, int ny, double wt0);
static void reg_wt(double *val, double *wt, float *data, int i, int j, int nx, int ny, double wt0);
/*
* HEADER:100:rpn:misc:1:reverse polish notation calculator
*/
int f_rpn(ARG1) {
char string[100];
const char *p;
int j, n;
unsigned int i, k, m;
float f;
float tmp;
int top, flag;
double cos_lat, last_lat;
double sum1, sum2, wt, sq1, sq2, sq12;
int nx, ny, res, scan;
unsigned int npnts;
float *p1, *p2;
int year, month, day, hour, minute, second;
static int state=0;
if (mode == -1) {
decode = latlon = 1;
if (state == 0) {
/* check compile-time configuration */
if (sizeof(rpn_n)/sizeof(size_t) != N_RPN_REGS)
fatal_error("RPN: configure N_RPN_REGS and rpn_n[]","");
if (sizeof(rpn_data)/sizeof(float *) != N_RPN_REGS)
fatal_error("RPN: configure N_RPN_REGS and rpn_data[]","");
state = 1;
}
return 0;
}
if (mode == -2) {
/* 5/2015 no cleanup for callable wgrib2, preserve registers
if (state == 1) {
for (i = 0; i < N_RPN_REGS; i++) {
if (rpn_data[i]) {
free (rpn_data[i]);
rpn_data[i] = NULL;
rpn_n[i] = 0;
}
}
}
state = 0;
*/
return 0;
}
// initialize stack
if (data == NULL) fatal_error("rpn: decode failed","");
use_scale = 0;
for (i = 0; i < STACK_SIZE; i++) stack[i] = NULL;
top = push(-1, ndata, VECTOR, 0.0, data, NULL);
if (mode == 98) fprintf(stderr,"RPN: arg=%s\n",arg1);
// scan parameters
p = arg1;
while (sscanf(p,"%[^:]%n", string, &n) == 1) {
if (mode == 98) fprintf(stderr, "RPN: top=%d (%s)", top, string);
p = p + n;
if (*p == ':') p++;
// binary operators + - * /
if (strcmp(string,"+") == 0) {
if (mode == 98) fprintf(stderr," plus");
if (top <= 0) fatal_error("rpn: bad + expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) { stack[j][i] = stack[j][i] + stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"-") == 0) {
if (mode == 98) fprintf(stderr," minus");
if (top <= 0) fatal_error("rpn: bad - expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = stack[j][i] - stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"*") == 0) {
if (mode == 98) fprintf(stderr," times");
if (top <= 0) fatal_error("rpn: bad * expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = stack[j][i] * stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"/") == 0) {
if (mode == 98) fprintf(stderr," div");
if (top <= 0) fatal_error("rpn: bad / expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i]) && (stack[top][i] != 0.0)) {
stack[j][i] = stack[j][i] / stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
// merge: stack(top-1) = stack(top) (if defined) ; top--;
else if (strcmp(string,"merge") == 0) {
if (mode == 98) fprintf(stderr," merge");
if (top <= 0) fatal_error("rpn: bad merge expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[j][i] = stack[top][i];
}
}
top--;
}
// exc (swap top and top-1 stack entries)
else if (strcmp(string,"exc") == 0 || strcmp(string,"swap") == 0) {
if (mode == 98) fprintf(stderr," exchange");
if (top <= 0) fatal_error("rpn: bad exc/swap expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
f = stack[j][i];
stack[j][i] = stack[top][i];
stack[top][i] = f;
}
}
// pop: top--;
else if (strcmp(string,"pop") == 0) {
if (mode == 98) fprintf(stderr," pop");
if (top < 0) fatal_error("rpn: bad pop","");
top--;
}
// dup: top++; stack(top) = stack(top-1)
else if (strcmp(string,"dup") == 0) {
if (mode == 98) fprintf(stderr," dup");
top = push(top,ndata,VECTOR,0.0,stack[top],NULL);
}
// sqrt: stack(top) = sqrt(stack(top))
else if (strcmp(string,"sqrt") == 0) {
if (mode == 98) fprintf(stderr," sqrt");
if (top < 0) fatal_error("rpn: bad sqrt expression","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && stack[top][i] >= 0.0) {
stack[top][i] = sqrtf(stack[top][i]);
}
else stack[top][i] = UNDEFINED;
}
}
// sq: x*x
else if (strcmp(string,"sq") == 0) {
if (mode == 98) fprintf(stderr," sq");
if (top < 0) fatal_error("rpn: bad sq expression","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] *= stack[top][i];
}
}
}
// pow: x^y
else if (strcmp(string,"pow") == 0) {
if (top <= 0) fatal_error("rpn: bad pow expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = powf(stack[j][i], stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
// ln - natural log
else if (strcmp(string,"ln") == 0) {
if (top < 0) fatal_error("rpn: bad log expression","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && stack[top][i] > 0.0) {
stack[top][i] = logf(stack[top][i]);
}
else stack[top][i] = UNDEFINED;
}
}
// exp
else if (strcmp(string,"exp") == 0) {
if (top < 0) fatal_error("rpn: bad exp expression","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = expf(stack[top][i]);
}
}
}
// abs
else if (strcmp(string,"abs") == 0) {
if (top < 0) fatal_error("rpn: bad abs expression","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (stack[top][i] < 0.0) stack[top][i] = -stack[top][i];
}
}
}
// 1/x
else if (strcmp(string,"1/x") == 0) {
if (mode == 98) fprintf(stderr," 1/x");
if (top < 0) fatal_error("rpn: bad 1/x","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && stack[top][i] != 0.0) {
stack[top][i] = 1.0 / stack[top][i];
}
else stack[top][i] = UNDEFINED;
}
}
// floor
else if (strcmp(string,"floor") == 0) {
if (top < 0) fatal_error("rpn: bad floor","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = floorf(stack[top][i]);
}
}
}
// ceil
else if (strcmp(string,"ceil") == 0) {
if (top < 0) fatal_error("rpn: bad ceil","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = ceilf(stack[top][i]);
}
}
}
// sin cos tan asin acos atan
else if (strcmp(string,"sin") == 0) {
if (top < 0) fatal_error("rpn: bad sin","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = sinf(stack[top][i]);
}
}
}
else if (strcmp(string,"cos") == 0) {
if (top < 0) fatal_error("rpn: bad cos","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = cosf(stack[top][i]);
}
}
}
else if (strcmp(string,"tan") == 0) {
if (top < 0) fatal_error("rpn: bad tan","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = tanf(stack[top][i]);
}
}
}
else if (strcmp(string,"asin") == 0) {
if (top < 0) fatal_error("rpn: bad asin","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (fabsf(stack[top][i]) > 1.0) stack[top][i] = UNDEFINED;
else stack[top][i] = asinf(stack[top][i]);
}
}
}
else if (strcmp(string,"acos") == 0) {
if (top < 0) fatal_error("rpn: bad acos","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (fabsf(stack[top][i]) > 1.0) stack[top][i] = UNDEFINED;
else stack[top][i] = acosf(stack[top][i]);
}
}
}
else if (strcmp(string,"atan") == 0) {
if (top < 0) fatal_error("rpn: bad atan","");
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
stack[top][i] = atanf(stack[top][i]);
}
}
}
else if (strcmp(string,"atan2") == 0) {
if (top <= 0) fatal_error("rpn: bad atan2 expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = atan2f(stack[j][i], stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
// sto_N
else if (string[0] == 's' && string[1] == 't' && string[2] == 'o' && string[3] == '_'
&& isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) {
if (top < 0) fatal_error("rpn: sto","");
j = atoi(string+4);
if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string);
if (ndata != rpn_n[j]) {
if (rpn_data[j]) free(rpn_data[j]);
rpn_n[j] = ndata;
rpn_data[j] = (float *) malloc(sizeof(float) * (size_t) ndata);
if (rpn_data[j] == NULL) fatal_error("rpn: memory allocation failed in %s",string);
}
for (i=0; i < ndata; i++) {
rpn_data[j][i] = stack[top][i];
}
}
// rcl_N
else if (string[0] == 'r' && string[1] == 'c' && string[2] == 'l' && string[3] == '_'
&& isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) {
j = atoi(string+4);
if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string);
if (rpn_n[j] != 0 && rpn_n[j] != ndata) fatal_error("rpn: rcl size mismatch","");
if (rpn_n[j] == 0) { // unused register are zero
top = push(top,ndata,SCALAR,0.0,rpn_data[j],NULL);
}
else {
top = push(top,ndata,VECTOR,0.0,rpn_data[j],NULL);
}
}
// clr_N
else if (string[0] == 'c' && string[1] == 'l' && string[2] == 'r' && string[3] == '_'
&& isdigit((unsigned char) string[4]) && (string[5] == 0 || (isdigit((unsigned char) string[5]) && string[6] == 0) )) {
j = atoi(string+4);
if (j >= N_RPN_REGS || j < 0) fatal_error("rpn: bad register number in %s", string);
if (rpn_data[j]) {
free(rpn_data[j]);
rpn_data[j] = NULL;
}
rpn_n[j] = 0;
}
// rcl_lat
else if (strcmp(string,"rcl_lat") == 0) {
if (lat == NULL) fatal_error("rpn: rcl_lat: lat not defined","");
top = push(top,ndata,DBL_VEC,0.0,NULL,lat);
}
// rcl_lon
else if (strcmp(string,"rcl_lon") == 0) {
if (lon == NULL) fatal_error("rpn: rcl_lon: lon not defined","");
top = push(top,ndata,DBL_VEC,0.0,NULL,lon);
}
// max and min
else if (strcmp(string,"max") == 0) {
if (top <= 0) fatal_error("rpn: bad max expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (stack[j][i] < stack[top][i]) stack[j][i] = stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"min") == 0) {
if (top <= 0) fatal_error("rpn: bad min expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (stack[j][i] > stack[top][i]) stack[j][i] = stack[top][i];
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,">") == 0) {
if (top <= 0) fatal_error("rpn: bad > expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] > stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,">=") == 0) {
if (top <= 0) fatal_error("rpn: bad >= expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] >= stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"!=") == 0) {
if (top <= 0) fatal_error("rpn: bad != expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] != stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"==") == 0) {
if (top <= 0) fatal_error("rpn: bad == expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] == stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"<") == 0) {
if (top <= 0) fatal_error("rpn: bad < expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] < stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"<=") == 0) {
if (top <= 0) fatal_error("rpn: bad <= expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
stack[j][i] = (stack[j][i] <= stack[top][i]);
}
else stack[j][i] = UNDEFINED;
}
top--;
}
else if (strcmp(string,"mask") == 0) {
if (top <= 0) fatal_error("rpn: bad mask expression","");
j = top-1;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (stack[top][i] == 0.0) stack[j][i] = UNDEFINED;
}
else stack[j][i] = UNDEFINED;
}
top--;
}
// yrev - like in GrADS : N <-> S
else if (strcmp(string,"yrev") == 0) {
if (top < 0) fatal_error("rpn: yrev needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids","");
if ((scan >> 4) != 0 && (scan >> 4) != 4)
fatal_error("rpn: yrev only appropriate for we:ns and we:sn grids","");
for (k = 0; k < ny/2; k++) {
p1 = stack[top] + nx*k;
p2 = stack[top] + nx*(ny-k-1);
for (m = 0; m < nx; m++) {
tmp = p1[m];
p1[m] = p2[m];
p2[m] = tmp;
}
}
}
// smth9 - like in GrADS smth9g - global field
else if (strcmp(string,"smth9g") == 0) {
if (mode == 98) fprintf(stderr," smth9");
if (top < 0) fatal_error("rpn: smth9 needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids","");
if ((scan >> 4) != 0 && (scan >> 4) != 4)
fatal_error("rpn: smth9 only appropriate for we:ns and we:sn grids","");
top = push(top,ndata,VECTOR,0.0,stack[top],NULL);
for (m = 0; m < ny; m++) {
for (i = 0; i < nx; i++) {
wt = sum1 = 0.0;
gbl_wt(&sum1, &wt, stack[top], i-1, m-1, nx, ny,0.3);
gbl_wt(&sum1, &wt, stack[top], i , m-1, nx, ny,0.5);
gbl_wt(&sum1, &wt, stack[top], i+1, m-1, nx, ny,0.3);
gbl_wt(&sum1, &wt, stack[top], i-1, m , nx, ny,0.5);
gbl_wt(&sum1, &wt, stack[top], i , m , nx, ny,1.0);
gbl_wt(&sum1, &wt, stack[top], i+1, m , nx, ny,0.5);
gbl_wt(&sum1, &wt, stack[top], i-1, m+1, nx, ny,0.3);
gbl_wt(&sum1, &wt, stack[top], i , m+1, nx, ny,0.5);
gbl_wt(&sum1, &wt, stack[top], i+1, m+1, nx, ny,0.3);
stack[top-1][i + m*nx] = wt > 0.0 ? sum1/wt : UNDEFINED;
}
}
top--;
}
// smth9r - like in GrADS smth9g - regional field
else if (strcmp(string,"smth9r") == 0) {
if (mode == 98) fprintf(stderr," smth9");
if (top < 0) fatal_error("rpn: smth9 needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: yrev only on nx x ny grids","");
if ((scan >> 4) != 0 && (scan >> 4) != 4)
fatal_error("rpn: smth9 only appropriate for we:ns and we:sn grids","");
top = push(top,ndata,VECTOR,0.0,stack[top],NULL);
for (m = 0; m < ny; m++) {
for (i = 0; i < nx; i++) {
wt = sum1 = 0.0;
reg_wt(&sum1, &wt, stack[top], i-1, m-1, nx, ny,0.3);
reg_wt(&sum1, &wt, stack[top], i , m-1, nx, ny,0.5);
reg_wt(&sum1, &wt, stack[top], i+1, m-1, nx, ny,0.3);
reg_wt(&sum1, &wt, stack[top], i-1, m , nx, ny,0.5);
reg_wt(&sum1, &wt, stack[top], i , m , nx, ny,1.0);
reg_wt(&sum1, &wt, stack[top], i+1, m , nx, ny,0.5);
reg_wt(&sum1, &wt, stack[top], i-1, m+1, nx, ny,0.3);
reg_wt(&sum1, &wt, stack[top], i , m+1, nx, ny,0.5);
reg_wt(&sum1, &wt, stack[top], i+1, m+1, nx, ny,0.3);
stack[top-1][i + m*nx] = wt > 0.0 ? sum1/wt : UNDEFINED;
}
}
top--;
}
else if (strcmp(string,"alt_x_scan") == 0) {
if (top < 0) fatal_error("rpn: yrev needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: alt_x_scan only works on nx x ny grids","");
for (k = 1; k < ny; k += 2) {
p1 = stack[top] + nx*k;
p2 = p1 + nx - 1;
for (m = 0; m < nx/2; m++) {
tmp = *p1;
*p1++ = *p2;
*p2-- = tmp;
}
}
}
else if (strcmp(string,"xave") == 0) { // x average the field
if (top < 0) fatal_error("rpn: xave needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: xave only works on nx x ny grids","");
for (k = 0; k < ndata; k += nx) {
sum1 = 0.0;
i = 0;
for (m = 0; m < nx; m++) {
if (DEFINED_VAL(stack[top][k+m])) {
sum1 += stack[top][k+m];
i++;
}
}
tmp = i ? sum1 / (double) i : 0.0;
for (m = 0; m < nx; m++) {
if (DEFINED_VAL(stack[top][k+m])) {
stack[top][k+m] = tmp;
}
}
}
}
else if (strcmp(string,"xdev") == 0) { // deviation from zonal mean
if (top < 0) fatal_error("rpn: xave needs field","");
get_nxny(sec, &nx, &ny, &npnts, &res, &scan);
if (nx <= 0 || ny <= 0) fatal_error("rpn: xave only works on nx x ny grids","");
for (k = 0; k < ndata; k += nx) {
sum1 = 0.0;
i = 0;
for (m = 0; m < nx; m++) {
if (DEFINED_VAL(stack[top][k+m])) {
sum1 += stack[top][k+m];
i++;
}
}
tmp = i ? sum1 / (double) i : 0.0;
for (m = 0; m < nx; m++) {
if (DEFINED_VAL(stack[top][k+m])) {
stack[top][k+m] -= tmp;
}
}
}
}
// change to rcl-data, rcl-lat, rcl-lon
// rcl: stack(++top) = data
else if (strcmp(string,"rcl") == 0) {
top = push(top,ndata,VECTOR,0.0,data,NULL);
}
// sto: data = stack(top)
else if (strcmp(string,"sto") == 0) {
if (top < 0) fatal_error("rpn: bad sto","");
for (i=0; i < ndata; i++) {
data[i] = stack[top][i];
}
}
// clr: emtpy stack
else if (strcmp(string,"clr") == 0) {
top = -1;
}
// pi: stack(++top) = pi
else if (strcmp(string,"pi") == 0) {
top = push(top,ndata,SCALAR,(float) M_PI,NULL,NULL);
}
// rand: stack(++top) = random number from 0..1
// note: rand() is not thread safe, do not OpenMP
// srand(seed) could be called first to set up seed
// since srand is not called, seed is 1
else if (strcmp(string,"rand") == 0) {
if (mode == 98) fprintf(stderr," rand");
top = push(top,ndata,SCALAR,(float) 0.0f,NULL,NULL);
for (i = 0; i < ndata; i++) {
stack[top][i] = (double) rand() / (double) RAND_MAX;
}
}
else if (strcmp(string,"days_in_ref_month") == 0) {
reftime(sec, &year, &month, &day, &hour, &minute, &second);
i = num_days_in_month(year, month);
top = push(top,ndata,SCALAR,(float) i,NULL,NULL);
}
else if (strcmp(string,"days_in_verf_month") == 0) {
verftime(sec, &year, &month, &day, &hour, &minute, &second);
i = num_days_in_month(year, month);
top = push(top,ndata,SCALAR,(float) i,NULL,NULL);
}
// print operations .. doesnt affect the stack
else if (strcmp(string,"print_max") == 0) {
if (top < 0) fatal_error("rpn: bad print_max expression","");
flag = 0;
tmp = 0.0;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (flag) tmp = (tmp < stack[top][i]) ? stack[top][i] : tmp;
else {
flag = 1;
tmp = stack[top][i];
}
}
}
sprintf(inv_out,"%srpn_max=%g",item_deliminator,tmp);
inv_out += strlen(inv_out);
}
else if (strcmp(string,"print_min") == 0) {
if (top < 0) fatal_error("rpn: bad print_min expression","");
flag = 0;
tmp = 0.0;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (flag) tmp = (tmp > stack[top][i]) ? stack[top][i] : tmp;
else {
flag = 1;
tmp = stack[top][i];
}
}
}
sprintf(inv_out,"%srpn_min=%g",item_deliminator,tmp);
inv_out += strlen(inv_out);
}
// print_diff: prints out cosine weighted difference (push - top)
else if (strcmp(string,"print_diff") == 0) {
if (top <= 0) fatal_error("rpn: print_rms needs two fields","");
j = top - 1;
last_lat = 0;
cos_lat = 1.0;
sum1 = wt = 0.0;
if (lat != NULL) {
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (last_lat != lat[i]) {
cos_lat = cos(lat[i]*M_PI/180.0);
last_lat = lat[i];
}
sum1 += (stack[j][i] - stack[top][i]) * cos_lat;
wt += cos_lat;
}
}
}
else {
#pragma omp parallel for private(i) reduction(+:wt,sum1)
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
sum1 += (stack[j][i] - stack[top][i]);
wt += 1.0;
}
}
}
if (wt != 0.0) sprintf(inv_out,"%srpn_diff=%g",item_deliminator,sum1/wt);
else sprintf(inv_out,"%srpn_diff=undefined",item_deliminator);
inv_out += strlen(inv_out);
}
// print_rms: prints out cosine weighted RMS
else if (strcmp(string,"print_rms") == 0) {
if (top <= 0) fatal_error("rpn: print_rms needs two fields","");
j = top - 1;
last_lat = 0;
cos_lat = 1.0;
sum1 = wt = 0.0;
if (lat != NULL) {
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (last_lat != lat[i]) {
cos_lat = cos(lat[i]*M_PI/180.0);
last_lat = lat[i];
}
sum1 += (stack[top][i] - stack[j][i]) * (stack[top][i] - stack[j][i]) * cos_lat;
wt += cos_lat;
}
}
}
else {
#pragma omp parallel for private(i) reduction(+:wt,sum1)
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
sum1 += (stack[top][i] - stack[j][i]) * (stack[top][i] - stack[j][i]);
wt += 1.0;
}
}
}
if (wt != 0.0) sprintf(inv_out,"%srpn_rms=%g",item_deliminator,sqrt(sum1/wt));
else sprintf(inv_out,"%srpn_rms=undefined",item_deliminator);
inv_out += strlen(inv_out);
}
// print_ave: prints out cosine weighted ave
else if (strcmp(string,"print_ave") == 0) {
if (top < 0) fatal_error("rpn: bad print_ave expression","");
// if (lat == NULL) fatal_error("rpn: print_ave .. no latitudes defined","");
last_lat = 0;
cos_lat = 1.0;
sum1 = wt = 0.0;
if (lat != NULL) {
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
if (last_lat != lat[i]) {
cos_lat = cos(lat[i]*M_PI/180.0);
last_lat = lat[i];
}
sum1 += stack[top][i] * cos_lat;
wt += cos_lat;
}
}
}
else {
#pragma omp parallel for private(i) reduction(+:wt,sum1)
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i])) {
sum1 += stack[top][i];
wt += 1.0;
}
}
}
if (wt != 0.0) sprintf(inv_out,"%srpn_ave=%g",item_deliminator,sum1/wt);
else sprintf(inv_out,"%srpn_ave=undefined",item_deliminator);
inv_out += strlen(inv_out);
}
// print_wt_ave: prints weighted ave, X=data, Y=weights
else if (strcmp(string,"print_wt_ave") == 0) {
if (top <= 0) fatal_error("rpn: print_wt_ave needs two fields","");
j = top - 1;
sum1 = sum2 = 0.0;
// find mean values
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
sum1 += stack[j][i]*stack[top][i];
sum2 += stack[top][i];
}
}
if (sum2 != 0.0) sum1 = sum1 / sum2;
sprintf(inv_out,"%srpn_wt_ave=%g",item_deliminator,sum1);
}
// print_corr: prints cosine(lat) weighted spatial correlation
else if (strcmp(string,"print_corr") == 0) {
if (top <= 0) fatal_error("rpn: print_corr needs two fields","");
// if (lat == NULL) fatal_error("rpn: print_corr .. no latitudes defined","");
j = top - 1;
sum1 = sum2 = wt = 0.0;
last_lat = 0;
cos_lat = 1.0;
sq1 = sq2 = sq12 = 0.0;
if (lat != NULL) {
// find mean values
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (last_lat != lat[i]) {
cos_lat = cos(lat[i]*M_PI/180.0);
last_lat = lat[i];
}
sum1 += stack[top][i] * cos_lat;
sum2 += stack[j][i] * cos_lat;
wt += cos_lat;
}
}
sum1 = sum1 / wt;
sum2 = sum2 / wt;
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
if (last_lat != lat[i]) {
cos_lat = cos(lat[i]*M_PI/180.0);
last_lat = lat[i];
}
sq1 += (stack[top][i]-sum1)*(stack[top][i]-sum1)*cos_lat;
sq2 += (stack[j][i]-sum2)*(stack[j][i]-sum2)*cos_lat;
sq12 += (stack[top][i]-sum1)*(stack[j][i]-sum2)*cos_lat;
}
}
}
else {
// find mean values
#pragma omp parallel for private(i) reduction(+:wt,sum1,sum2)
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
sum1 += stack[top][i];
sum2 += stack[j][i];
wt += 1.0;
}
}
sum1 = sum1 / wt;
sum2 = sum2 / wt;
#pragma omp parallel for private(i) reduction(+:sq1,sq2,sq12)
for (i = 0; i < ndata; i++) {
if (DEFINED_VAL(stack[top][i]) && DEFINED_VAL(stack[j][i])) {
sq1 += (stack[top][i]-sum1)*(stack[top][i]-sum1);
sq2 += (stack[j][i]-sum2)*(stack[j][i]-sum2);
sq12 += (stack[top][i]-sum1)*(stack[j][i]-sum2);
}
}
}
sq1 = sq1 / wt;
sq2 = sq2 / wt;
sq12 = sq12 / wt;
if (sq1*sq2 == 0.0) sprintf(inv_out,"%srpn_corr=%g",item_deliminator,1.0);
else sprintf(inv_out,"%srpn_corr=%g",item_deliminator, sq12/sqrt(sq1*sq2));
inv_out += strlen(inv_out);
}
// number: stack(++top) = number
else if (string[0] == '+' || string[0] == '-' || string[0] == '.' || isdigit((unsigned char) string[0])) {
f = atof(string);
top = push(top,ndata,SCALAR,f,NULL,NULL);
if (mode == 98) fprintf(stderr," constant=%f", f);
}
else fatal_error("rpn: unidentified symbol %s", string);
if (mode == 98) fprintf(stderr," top=%d\n", top);
}
if (*p != 0) fatal_error("-rpn didn't find operatore or value before %s",p);
if (top >= 0) {
for (i = 0; i < ndata; i++) {
data[i] = stack[top][i];
}
}
else fatal_error("rpn: stack empty","");
// free stack
for (i = 0; i < STACK_SIZE; i++) free(stack[i]);
return 0;
}
int push(int top, unsigned int ndata, int type, float f, float *ff, double *d) {
unsigned int i;
if (++top == STACK_SIZE) fatal_error_i("rpn: push: stack overflow %d",top);
if (stack[top] == NULL) {
stack[top] = (float *) malloc(sizeof(float) * (size_t) ndata);
if (stack[top] == NULL) fatal_error("rpn: push: memory allocation","");
}
if (type == SCALAR) {
for (i = 0; i < ndata; i++) stack[top][i] = f;
}
else if (type == VECTOR) {
for (i = 0; i < ndata; i++) stack[top][i] = ff[i];
}
else if (type == DBL_VEC) {
for (i = 0; i < ndata; i++) stack[top][i] = (float) d[i];
}
return top;
}
/*
* HEADER:100:if_reg:misc:1:if rpn registers defined, X = A, A:B, A:B:C, etc A = register number
*/
int f_if_reg(ARG1) {
int i, j, *list;
const char *p;
if (mode == -1) {
// figure out the number of arguments
i = 1;
p = arg1;
while (*p) {
if (*p++ == ':') i++;
}
*local = list = (int *) calloc(i+1, sizeof (int));
if (list == NULL) fatal_error("if_reg: memory allocation failed","");
list[0] = i;
p = arg1;
for (j = 1; j <= i; j++) {
list[j] = atoi(p);
if (list[j] >= N_RPN_REGS || list[j] < 0) fatal_error_i("if_reg: bad register %d", list[j]);
while (isdigit((unsigned char) *p)) p++;
if (*p == ':') p++;
}
}
else if (mode == -2) {
list = (int *) *local;
free(list);
}
else if (mode >= 0) {
list = (int *) *local;
i = list[0];
match_flag = 0;
for (j=1; j <= i; j++) {
if (rpn_n[list[j]] == 0) match_flag = 1;
}
}
return 0;
}
/*
* HEADER:100:rpn_rcl:misc:1:data = register X .. same as -rpn rcl_X .. no geolocation calc needed
*/
int f_rpn_rcl(ARG1) {
int reg;
if (mode == -1) {
decode = 1;
}
else if (mode >= 0) {
reg = atoi(arg1);
if (reg < 0 || reg >= N_RPN_REGS) fatal_error_i("rpn_rcl: bad register %d", reg);
if (ndata != rpn_n[reg]) fatal_error("rpn_rcl: size mismatch","");
use_scale = 0;
memcpy(data, rpn_data[reg], ndata * sizeof(float));
}
return 0;
}
/*
* HEADER:100:rpn_sto:misc:1:register X = data.. same as -rpn sto_X .. no geolocation calc needed
*/
int f_rpn_sto(ARG1) {
int reg;
if (mode == -1) {
decode = 1;
}
else if (mode >= 0) {
reg = atoi(arg1);
if (reg < 0 || reg >= N_RPN_REGS) fatal_error_i("rpn_sto: bad register %d", reg);
if (ndata != rpn_n[reg]) {
if (rpn_n[reg] != 0) free(rpn_data[reg]);
rpn_data[reg] = (float *) malloc(sizeof(float) * (size_t) ndata);
if (rpn_data[reg] == NULL) {
rpn_n[reg] = 0;
fatal_error("rpn_sto: memory allocation","");
}
rpn_n[reg] = ndata;
}
memcpy(rpn_data[reg], data, ndata * sizeof(float));
}
return 0;
}
static void gbl_wt(double *sum, double *wt, float *data, int i, int j, int nx, int ny, double wt0) {
float t;
i = (i == -1) ? nx-1 : i;
i = (i == nx) ? 0 : i;
if (i < 0 || i >= nx || j < 0 || j >= ny) return;
t = data[i + j*nx];
if (UNDEFINED_VAL(t)) return;
*wt = *wt + wt0;
*sum = *sum + t*wt0;
return;
}
static void reg_wt(double *sum, double *wt, float *data, int i, int j, int nx, int ny, double wt0) {
float t;
if (i < 0 || i >= nx || j < 0 || j >= ny) return;
t = data[i + j*nx];
if (UNDEFINED_VAL(t)) return;
*wt = *wt + wt0;
*sum = *sum + t*wt0;
return;
}
/*
* routines to allow code to get or set various RPN registers
*
* for example: read field, save in reg_0
* allocate array to match read grid dimensions
* copy reg_0 to array
*/
size_t wgrib2_get_reg_size(int reg) {
if (reg < 0 ||reg >= N_RPN_REGS) return 0;
return rpn_n[reg];
}
int wgrib2_get_reg_data(float *data, size_t size, int reg) {
if (reg < 0 || reg >= N_RPN_REGS) return 1;
if (rpn_n[reg] != size) return 2;
memcpy(data, rpn_data[reg], sizeof(float) * (size_t) size);
return 0;
}
int wgrib2_set_reg(float *data, size_t size, int reg) {
if (reg < 0 || reg >= N_RPN_REGS) return 1;
if (rpn_n[reg] != size) {
if (rpn_data[reg] != NULL) free(rpn_data[reg]);
rpn_n[reg] = 0;
rpn_data[reg] = (float *) malloc(sizeof(float) * (size_t) size);
if (rpn_data[reg] == NULL) return 2;
rpn_n[reg] = size;
}
memcpy(rpn_data[reg], data, size * sizeof(float));
return 0;
}
|
_kdtree_core.c | /*
pykdtree, Fast kd-tree implementation with OpenMP-enabled queries
Copyright (C) 2013 - present Esben S. Nielsen
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
This kd-tree implementation is based on the scipy.spatial.cKDTree by
Anne M. Archibald and libANN by David M. Mount and Sunil Arya.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#define PA(i,d) (pa[no_dims * pidx[i] + d])
#define PASWAP(a,b) { uint32_t tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; }
#ifdef _MSC_VER
#define restrict __restrict
#endif
typedef struct
{
float cut_val;
int8_t cut_dim;
uint32_t start_idx;
uint32_t n;
float cut_bounds_lv;
float cut_bounds_hv;
struct Node_float *left_child;
struct Node_float *right_child;
} Node_float;
typedef struct
{
float *bbox;
int8_t no_dims;
uint32_t *pidx;
struct Node_float *root;
} Tree_float;
typedef struct
{
double cut_val;
int8_t cut_dim;
uint32_t start_idx;
uint32_t n;
double cut_bounds_lv;
double cut_bounds_hv;
struct Node_double *left_child;
struct Node_double *right_child;
} Node_double;
typedef struct
{
double *bbox;
int8_t no_dims;
uint32_t *pidx;
struct Node_double *root;
} Tree_double;
void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k);
void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox);
int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim,
float *cut_val, uint32_t *n_lo);
Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp);
Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox);
Node_float * create_node_float(uint32_t start_idx, uint32_t n, int is_leaf);
void delete_subtree_float(Node_float *root);
void delete_tree_float(Tree_float *tree);
void print_tree_float(Node_float *root, int level);
float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims);
float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox);
float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox);
void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord,
uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist);
void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord,
uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, float *restrict closest_dist);
void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord,
float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask, uint32_t * closest_idx, float *closest_dist);
void search_tree_float(Tree_float *tree, float *pa, float *point_coords,
uint32_t num_points, uint32_t k, float distance_upper_bound,
float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists);
void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k);
void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox);
int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim,
double *cut_val, uint32_t *n_lo);
Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp);
Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox);
Node_double * create_node_double(uint32_t start_idx, uint32_t n, int is_leaf);
void delete_subtree_double(Node_double *root);
void delete_tree_double(Tree_double *tree);
void print_tree_double(Node_double *root, int level);
double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims);
double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox);
double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox);
void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord,
uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist);
void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord,
uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, double *restrict closest_dist);
void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord,
double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask, uint32_t * closest_idx, double *closest_dist);
void search_tree_double(Tree_double *tree, double *pa, double *point_coords,
uint32_t num_points, uint32_t k, double distance_upper_bound,
double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists);
/************************************************
Insert point into priority queue
Params:
closest_idx : index queue
closest_dist : distance queue
pidx : permutation index of data points
cur_dist : distance to point inserted
k : number of neighbours
************************************************/
void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k)
{
int i;
for (i = k - 1; i > 0; i--)
{
if (closest_dist[i - 1] > cur_dist)
{
closest_dist[i] = closest_dist[i - 1];
closest_idx[i] = closest_idx[i - 1];
}
else
{
break;
}
}
closest_idx[i] = pidx;
closest_dist[i] = cur_dist;
}
/************************************************
Get the bounding box of a set of points
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
n : number of points
bbox : bounding box (return)
************************************************/
void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox)
{
float cur;
int8_t bbox_idx, i, j;
uint32_t i2;
/* Use first data point to initialize */
for (i = 0; i < no_dims; i++)
{
bbox[2 * i] = bbox[2 * i + 1] = PA(0, i);
}
/* Update using rest of data points */
for (i2 = 1; i2 < n; i2++)
{
for (j = 0; j < no_dims; j++)
{
bbox_idx = 2 * j;
cur = PA(i2, j);
if (cur < bbox[bbox_idx])
{
bbox[bbox_idx] = cur;
}
else if (cur > bbox[bbox_idx + 1])
{
bbox[bbox_idx + 1] = cur;
}
}
}
}
/************************************************
Partition a range of data points by manipulation the permutation index.
The sliding midpoint rule is used for the partitioning.
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
start_idx : index of first data point to use
n : number of data points
bbox : bounding box of data points
cut_dim : dimension used for partition (return)
cut_val : value of cutting point (return)
n_lo : number of point below cutting plane (return)
************************************************/
int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim, float *cut_val, uint32_t *n_lo)
{
int8_t dim = 0, i;
uint32_t p, q, i2;
float size = 0, min_val, max_val, split, side_len, cur_val;
uint32_t end_idx = start_idx + n - 1;
/* Find largest bounding box side */
for (i = 0; i < no_dims; i++)
{
side_len = bbox[2 * i + 1] - bbox[2 * i];
if (side_len > size)
{
dim = i;
size = side_len;
}
}
min_val = bbox[2 * dim];
max_val = bbox[2 * dim + 1];
/* Check for zero length or inconsistent */
if (min_val >= max_val)
return 1;
/* Use middle for splitting */
split = (min_val + max_val) / 2;
/* Partition all data points around middle */
p = start_idx;
q = end_idx;
while (p <= q)
{
if (PA(p, dim) < split)
{
p++;
}
else if (PA(q, dim) >= split)
{
/* Guard for underflow */
if (q > 0)
{
q--;
}
else
{
break;
}
}
else
{
PASWAP(p, q);
p++;
q--;
}
}
/* Check for empty splits */
if (p == start_idx)
{
/* No points less than split.
Split at lowest point instead.
Minimum 1 point will be in lower box.
*/
uint32_t j = start_idx;
split = PA(j, dim);
for (i2 = start_idx + 1; i2 <= end_idx; i2++)
{
/* Find lowest point */
cur_val = PA(i2, dim);
if (cur_val < split)
{
j = i2;
split = cur_val;
}
}
PASWAP(j, start_idx);
p = start_idx + 1;
}
else if (p == end_idx + 1)
{
/* No points greater than split.
Split at highest point instead.
Minimum 1 point will be in higher box.
*/
uint32_t j = end_idx;
split = PA(j, dim);
for (i2 = start_idx; i2 < end_idx; i2++)
{
/* Find highest point */
cur_val = PA(i2, dim);
if (cur_val > split)
{
j = i2;
split = cur_val;
}
}
PASWAP(j, end_idx);
p = end_idx;
}
/* Set return values */
*cut_dim = dim;
*cut_val = split;
*n_lo = p - start_idx;
return 0;
}
/************************************************
Construct a sub tree over a range of data points.
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
start_idx : index of first data point to use
n : number of data points
bsp : number of points per leaf
bbox : bounding box of set of data points
************************************************/
Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox)
{
/* Create new node */
int is_leaf = (n <= bsp);
Node_float *root = create_node_float(start_idx, n, is_leaf);
int rval;
int8_t cut_dim;
uint32_t n_lo;
float cut_val, lv, hv;
if (is_leaf)
{
/* Make leaf node */
root->cut_dim = -1;
}
else
{
/* Make split node */
/* Partition data set and set node info */
rval = partition_float(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo);
if (rval == 1)
{
root->cut_dim = -1;
return root;
}
root->cut_val = cut_val;
root->cut_dim = cut_dim;
/* Recurse on both subsets */
lv = bbox[2 * cut_dim];
hv = bbox[2 * cut_dim + 1];
/* Set bounds for cut dimension */
root->cut_bounds_lv = lv;
root->cut_bounds_hv = hv;
/* Update bounding box before call to lower subset and restore after */
bbox[2 * cut_dim + 1] = cut_val;
root->left_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox);
bbox[2 * cut_dim + 1] = hv;
/* Update bounding box before call to higher subset and restore after */
bbox[2 * cut_dim] = cut_val;
root->right_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox);
bbox[2 * cut_dim] = lv;
}
return root;
}
/************************************************
Construct a tree over data points.
Params:
pa : data points
no_dims: number of dimensions
n : number of data points
bsp : number of points per leaf
************************************************/
Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp)
{
Tree_float *tree = (Tree_float *)malloc(sizeof(Tree_float));
uint32_t i;
uint32_t *pidx;
float *bbox;
tree->no_dims = no_dims;
/* Initialize permutation array */
pidx = (uint32_t *)malloc(sizeof(uint32_t) * n);
for (i = 0; i < n; i++)
{
pidx[i] = i;
}
bbox = (float *)malloc(2 * sizeof(float) * no_dims);
get_bounding_box_float(pa, pidx, no_dims, n, bbox);
tree->bbox = bbox;
/* Construct subtree on full dataset */
tree->root = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, 0, n, bsp, bbox);
tree->pidx = pidx;
return tree;
}
/************************************************
Create a tree node.
Params:
start_idx : index of first data point to use
n : number of data points
************************************************/
Node_float* create_node_float(uint32_t start_idx, uint32_t n, int is_leaf)
{
Node_float *new_node;
if (is_leaf)
{
/*
Allocate only the part of the struct that will be used in a leaf node.
This relies on the C99 specification of struct layout conservation and padding and
that dereferencing is never attempted for the node pointers in a leaf.
*/
new_node = (Node_float *)malloc(sizeof(Node_float) - 2 * sizeof(Node_float *));
}
else
{
new_node = (Node_float *)malloc(sizeof(Node_float));
}
new_node->n = n;
new_node->start_idx = start_idx;
return new_node;
}
/************************************************
Delete subtree
Params:
root : root node of subtree to delete
************************************************/
void delete_subtree_float(Node_float *root)
{
if (root->cut_dim != -1)
{
delete_subtree_float((Node_float *)root->left_child);
delete_subtree_float((Node_float *)root->right_child);
}
free(root);
}
/************************************************
Delete tree
Params:
tree : Tree struct of kd tree
************************************************/
void delete_tree_float(Tree_float *tree)
{
delete_subtree_float((Node_float *)tree->root);
free(tree->bbox);
free(tree->pidx);
free(tree);
}
/************************************************
Print
************************************************/
void print_tree_float(Node_float *root, int level)
{
int i;
for (i = 0; i < level; i++)
{
printf(" ");
}
printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim);
if (root->cut_dim != -1)
print_tree_float((Node_float *)root->left_child, level + 1);
if (root->cut_dim != -1)
print_tree_float((Node_float *)root->right_child, level + 1);
}
/************************************************
Calculate squared cartesian distance between points
Params:
point1_coord : point 1
point2_coord : point 2
************************************************/
float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims)
{
/* Calculate squared distance */
float dist = 0, dim_dist;
int8_t i;
for (i = 0; i < no_dims; i++)
{
dim_dist = point2_coord[i] - point1_coord[i];
dist += dim_dist * dim_dist;
}
return dist;
}
/************************************************
Get squared distance from point to cube in specified dimension
Params:
dim : dimension
point_coord : cartesian coordinates of point
bbox : cube
************************************************/
float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox)
{
float dim_coord = point_coord[dim];
if (dim_coord < bbox[2 * dim])
{
/* Left of cube in dimension */
return dim_coord - bbox[2 * dim];
}
else if (dim_coord > bbox[2 * dim + 1])
{
/* Right of cube in dimension */
return dim_coord - bbox[2 * dim + 1];
}
else
{
/* Inside cube in dimension */
return 0.;
}
}
/************************************************
Get minimum squared distance between point and cube.
Params:
point_coord : cartesian coordinates of point
no_dims : number of dimensions
bbox : cube
************************************************/
float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox)
{
float cube_offset = 0, cube_offset_dim;
int8_t i;
for (i = 0; i < no_dims; i++)
{
cube_offset_dim = get_cube_offset_float(i, point_coord, bbox);
cube_offset += cube_offset_dim * cube_offset_dim;
}
return cube_offset;
}
/************************************************
Search a leaf node for closest point
Params:
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
start_idx : index of first data point to use
size : number of data points
point_coord : query point
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord,
uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist)
{
float cur_dist;
uint32_t i;
/* Loop through all points in leaf */
for (i = 0; i < n; i++)
{
/* Get distance to query point */
cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims);
/* Update closest info if new point is closest so far*/
if (cur_dist < closest_dist[k - 1])
{
insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k);
}
}
}
/************************************************
Search a leaf node for closest point with data point mask
Params:
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
start_idx : index of first data point to use
size : number of data points
point_coord : query point
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord,
uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, float *restrict closest_dist)
{
float cur_dist;
uint32_t i;
/* Loop through all points in leaf */
for (i = 0; i < n; i++)
{
/* Is this point masked out? */
if (mask[pidx[start_idx + i]])
{
continue;
}
/* Get distance to query point */
cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims);
/* Update closest info if new point is closest so far*/
if (cur_dist < closest_dist[k - 1])
{
insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k);
}
}
}
/************************************************
Search subtree for nearest to query point
Params:
root : root node of subtree
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
point_coord : query point
min_dist : minumum distance to nearest neighbour
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord,
float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask,
uint32_t *closest_idx, float *closest_dist)
{
int8_t dim;
float dist_left, dist_right;
float new_offset;
float box_diff;
/* Skip if distance bound exeeded */
if (min_dist > distance_upper_bound)
{
return;
}
dim = root->cut_dim;
/* Handle leaf node */
if (dim == -1)
{
if (mask)
{
search_leaf_float_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist);
}
else
{
search_leaf_float(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist);
}
return;
}
/* Get distance to cutting plane */
new_offset = point_coord[dim] - root->cut_val;
if (new_offset < 0)
{
/* Left of cutting plane */
dist_left = min_dist;
if (dist_left < closest_dist[k - 1] * eps_fac)
{
/* Search left subtree if minimum distance is below limit */
search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
/* Right of cutting plane. Update minimum distance.
See Algorithms for Fast Vector Quantization
Sunil Arya and David M. Mount. */
box_diff = root->cut_bounds_lv - point_coord[dim];
if (box_diff < 0)
{
box_diff = 0;
}
dist_right = min_dist - box_diff * box_diff + new_offset * new_offset;
if (dist_right < closest_dist[k - 1] * eps_fac)
{
/* Search right subtree if minimum distance is below limit*/
search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
}
else
{
/* Right of cutting plane */
dist_right = min_dist;
if (dist_right < closest_dist[k - 1] * eps_fac)
{
/* Search right subtree if minimum distance is below limit*/
search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
/* Left of cutting plane. Update minimum distance.
See Algorithms for Fast Vector Quantization
Sunil Arya and David M. Mount. */
box_diff = point_coord[dim] - root->cut_bounds_hv;
if (box_diff < 0)
{
box_diff = 0;
}
dist_left = min_dist - box_diff * box_diff + new_offset * new_offset;
if (dist_left < closest_dist[k - 1] * eps_fac)
{
/* Search left subtree if minimum distance is below limit*/
search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
}
}
/************************************************
Search for nearest neighbour for a set of query points
Params:
tree : Tree struct of kd tree
pa : data points
pidx : permutation index of data points
point_coords : query points
num_points : number of query points
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_tree_float(Tree_float *tree, float *pa, float *point_coords,
uint32_t num_points, uint32_t k, float distance_upper_bound,
float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists)
{
float min_dist;
float eps_fac = 1 / ((1 + eps) * (1 + eps));
int8_t no_dims = tree->no_dims;
float *bbox = tree->bbox;
uint32_t *pidx = tree->pidx;
uint32_t j = 0;
#if defined(_MSC_VER) && defined(_OPENMP)
int32_t i = 0;
int32_t local_num_points = (int32_t) num_points;
#else
uint32_t i;
uint32_t local_num_points = num_points;
#endif
Node_float *root = (Node_float *)tree->root;
/* Queries are OpenMP enabled */
#pragma omp parallel
{
/* The low chunk size is important to avoid L2 cache trashing
for spatial coherent query datasets
*/
#pragma omp for private(i, j) schedule(static, 100) nowait
for (i = 0; i < local_num_points; i++)
{
for (j = 0; j < k; j++)
{
closest_idxs[i * k + j] = UINT32_MAX;
closest_dists[i * k + j] = DBL_MAX;
}
min_dist = get_min_dist_float(point_coords + no_dims * i, no_dims, bbox);
search_splitnode_float(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist,
k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]);
}
}
}
/************************************************
Insert point into priority queue
Params:
closest_idx : index queue
closest_dist : distance queue
pidx : permutation index of data points
cur_dist : distance to point inserted
k : number of neighbours
************************************************/
void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k)
{
int i;
for (i = k - 1; i > 0; i--)
{
if (closest_dist[i - 1] > cur_dist)
{
closest_dist[i] = closest_dist[i - 1];
closest_idx[i] = closest_idx[i - 1];
}
else
{
break;
}
}
closest_idx[i] = pidx;
closest_dist[i] = cur_dist;
}
/************************************************
Get the bounding box of a set of points
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
n : number of points
bbox : bounding box (return)
************************************************/
void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox)
{
double cur;
int8_t bbox_idx, i, j;
uint32_t i2;
/* Use first data point to initialize */
for (i = 0; i < no_dims; i++)
{
bbox[2 * i] = bbox[2 * i + 1] = PA(0, i);
}
/* Update using rest of data points */
for (i2 = 1; i2 < n; i2++)
{
for (j = 0; j < no_dims; j++)
{
bbox_idx = 2 * j;
cur = PA(i2, j);
if (cur < bbox[bbox_idx])
{
bbox[bbox_idx] = cur;
}
else if (cur > bbox[bbox_idx + 1])
{
bbox[bbox_idx + 1] = cur;
}
}
}
}
/************************************************
Partition a range of data points by manipulation the permutation index.
The sliding midpoint rule is used for the partitioning.
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
start_idx : index of first data point to use
n : number of data points
bbox : bounding box of data points
cut_dim : dimension used for partition (return)
cut_val : value of cutting point (return)
n_lo : number of point below cutting plane (return)
************************************************/
int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim, double *cut_val, uint32_t *n_lo)
{
int8_t dim = 0, i;
uint32_t p, q, i2;
double size = 0, min_val, max_val, split, side_len, cur_val;
uint32_t end_idx = start_idx + n - 1;
/* Find largest bounding box side */
for (i = 0; i < no_dims; i++)
{
side_len = bbox[2 * i + 1] - bbox[2 * i];
if (side_len > size)
{
dim = i;
size = side_len;
}
}
min_val = bbox[2 * dim];
max_val = bbox[2 * dim + 1];
/* Check for zero length or inconsistent */
if (min_val >= max_val)
return 1;
/* Use middle for splitting */
split = (min_val + max_val) / 2;
/* Partition all data points around middle */
p = start_idx;
q = end_idx;
while (p <= q)
{
if (PA(p, dim) < split)
{
p++;
}
else if (PA(q, dim) >= split)
{
/* Guard for underflow */
if (q > 0)
{
q--;
}
else
{
break;
}
}
else
{
PASWAP(p, q);
p++;
q--;
}
}
/* Check for empty splits */
if (p == start_idx)
{
/* No points less than split.
Split at lowest point instead.
Minimum 1 point will be in lower box.
*/
uint32_t j = start_idx;
split = PA(j, dim);
for (i2 = start_idx + 1; i2 <= end_idx; i2++)
{
/* Find lowest point */
cur_val = PA(i2, dim);
if (cur_val < split)
{
j = i2;
split = cur_val;
}
}
PASWAP(j, start_idx);
p = start_idx + 1;
}
else if (p == end_idx + 1)
{
/* No points greater than split.
Split at highest point instead.
Minimum 1 point will be in higher box.
*/
uint32_t j = end_idx;
split = PA(j, dim);
for (i2 = start_idx; i2 < end_idx; i2++)
{
/* Find highest point */
cur_val = PA(i2, dim);
if (cur_val > split)
{
j = i2;
split = cur_val;
}
}
PASWAP(j, end_idx);
p = end_idx;
}
/* Set return values */
*cut_dim = dim;
*cut_val = split;
*n_lo = p - start_idx;
return 0;
}
/************************************************
Construct a sub tree over a range of data points.
Params:
pa : data points
pidx : permutation index of data points
no_dims: number of dimensions
start_idx : index of first data point to use
n : number of data points
bsp : number of points per leaf
bbox : bounding box of set of data points
************************************************/
Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox)
{
/* Create new node */
int is_leaf = (n <= bsp);
Node_double *root = create_node_double(start_idx, n, is_leaf);
int rval;
int8_t cut_dim;
uint32_t n_lo;
double cut_val, lv, hv;
if (is_leaf)
{
/* Make leaf node */
root->cut_dim = -1;
}
else
{
/* Make split node */
/* Partition data set and set node info */
rval = partition_double(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo);
if (rval == 1)
{
root->cut_dim = -1;
return root;
}
root->cut_val = cut_val;
root->cut_dim = cut_dim;
/* Recurse on both subsets */
lv = bbox[2 * cut_dim];
hv = bbox[2 * cut_dim + 1];
/* Set bounds for cut dimension */
root->cut_bounds_lv = lv;
root->cut_bounds_hv = hv;
/* Update bounding box before call to lower subset and restore after */
bbox[2 * cut_dim + 1] = cut_val;
root->left_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox);
bbox[2 * cut_dim + 1] = hv;
/* Update bounding box before call to higher subset and restore after */
bbox[2 * cut_dim] = cut_val;
root->right_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox);
bbox[2 * cut_dim] = lv;
}
return root;
}
/************************************************
Construct a tree over data points.
Params:
pa : data points
no_dims: number of dimensions
n : number of data points
bsp : number of points per leaf
************************************************/
Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp)
{
Tree_double *tree = (Tree_double *)malloc(sizeof(Tree_double));
uint32_t i;
uint32_t *pidx;
double *bbox;
tree->no_dims = no_dims;
/* Initialize permutation array */
pidx = (uint32_t *)malloc(sizeof(uint32_t) * n);
for (i = 0; i < n; i++)
{
pidx[i] = i;
}
bbox = (double *)malloc(2 * sizeof(double) * no_dims);
get_bounding_box_double(pa, pidx, no_dims, n, bbox);
tree->bbox = bbox;
/* Construct subtree on full dataset */
tree->root = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, 0, n, bsp, bbox);
tree->pidx = pidx;
return tree;
}
/************************************************
Create a tree node.
Params:
start_idx : index of first data point to use
n : number of data points
************************************************/
Node_double* create_node_double(uint32_t start_idx, uint32_t n, int is_leaf)
{
Node_double *new_node;
if (is_leaf)
{
/*
Allocate only the part of the struct that will be used in a leaf node.
This relies on the C99 specification of struct layout conservation and padding and
that dereferencing is never attempted for the node pointers in a leaf.
*/
new_node = (Node_double *)malloc(sizeof(Node_double) - 2 * sizeof(Node_double *));
}
else
{
new_node = (Node_double *)malloc(sizeof(Node_double));
}
new_node->n = n;
new_node->start_idx = start_idx;
return new_node;
}
/************************************************
Delete subtree
Params:
root : root node of subtree to delete
************************************************/
void delete_subtree_double(Node_double *root)
{
if (root->cut_dim != -1)
{
delete_subtree_double((Node_double *)root->left_child);
delete_subtree_double((Node_double *)root->right_child);
}
free(root);
}
/************************************************
Delete tree
Params:
tree : Tree struct of kd tree
************************************************/
void delete_tree_double(Tree_double *tree)
{
delete_subtree_double((Node_double *)tree->root);
free(tree->bbox);
free(tree->pidx);
free(tree);
}
/************************************************
Print
************************************************/
void print_tree_double(Node_double *root, int level)
{
int i;
for (i = 0; i < level; i++)
{
printf(" ");
}
printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim);
if (root->cut_dim != -1)
print_tree_double((Node_double *)root->left_child, level + 1);
if (root->cut_dim != -1)
print_tree_double((Node_double *)root->right_child, level + 1);
}
/************************************************
Calculate squared cartesian distance between points
Params:
point1_coord : point 1
point2_coord : point 2
************************************************/
double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims)
{
/* Calculate squared distance */
double dist = 0, dim_dist;
int8_t i;
for (i = 0; i < no_dims; i++)
{
dim_dist = point2_coord[i] - point1_coord[i];
dist += dim_dist * dim_dist;
}
return dist;
}
/************************************************
Get squared distance from point to cube in specified dimension
Params:
dim : dimension
point_coord : cartesian coordinates of point
bbox : cube
************************************************/
double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox)
{
double dim_coord = point_coord[dim];
if (dim_coord < bbox[2 * dim])
{
/* Left of cube in dimension */
return dim_coord - bbox[2 * dim];
}
else if (dim_coord > bbox[2 * dim + 1])
{
/* Right of cube in dimension */
return dim_coord - bbox[2 * dim + 1];
}
else
{
/* Inside cube in dimension */
return 0.;
}
}
/************************************************
Get minimum squared distance between point and cube.
Params:
point_coord : cartesian coordinates of point
no_dims : number of dimensions
bbox : cube
************************************************/
double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox)
{
double cube_offset = 0, cube_offset_dim;
int8_t i;
for (i = 0; i < no_dims; i++)
{
cube_offset_dim = get_cube_offset_double(i, point_coord, bbox);
cube_offset += cube_offset_dim * cube_offset_dim;
}
return cube_offset;
}
/************************************************
Search a leaf node for closest point
Params:
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
start_idx : index of first data point to use
size : number of data points
point_coord : query point
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord,
uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist)
{
double cur_dist;
uint32_t i;
/* Loop through all points in leaf */
for (i = 0; i < n; i++)
{
/* Get distance to query point */
cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims);
/* Update closest info if new point is closest so far*/
if (cur_dist < closest_dist[k - 1])
{
insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k);
}
}
}
/************************************************
Search a leaf node for closest point with data point mask
Params:
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
start_idx : index of first data point to use
size : number of data points
point_coord : query point
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord,
uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, double *restrict closest_dist)
{
double cur_dist;
uint32_t i;
/* Loop through all points in leaf */
for (i = 0; i < n; i++)
{
/* Is this point masked out? */
if (mask[pidx[start_idx + i]])
{
continue;
}
/* Get distance to query point */
cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims);
/* Update closest info if new point is closest so far*/
if (cur_dist < closest_dist[k - 1])
{
insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k);
}
}
}
/************************************************
Search subtree for nearest to query point
Params:
root : root node of subtree
pa : data points
pidx : permutation index of data points
no_dims : number of dimensions
point_coord : query point
min_dist : minumum distance to nearest neighbour
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord,
double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask,
uint32_t *closest_idx, double *closest_dist)
{
int8_t dim;
double dist_left, dist_right;
double new_offset;
double box_diff;
/* Skip if distance bound exeeded */
if (min_dist > distance_upper_bound)
{
return;
}
dim = root->cut_dim;
/* Handle leaf node */
if (dim == -1)
{
if (mask)
{
search_leaf_double_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist);
}
else
{
search_leaf_double(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist);
}
return;
}
/* Get distance to cutting plane */
new_offset = point_coord[dim] - root->cut_val;
if (new_offset < 0)
{
/* Left of cutting plane */
dist_left = min_dist;
if (dist_left < closest_dist[k - 1] * eps_fac)
{
/* Search left subtree if minimum distance is below limit */
search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
/* Right of cutting plane. Update minimum distance.
See Algorithms for Fast Vector Quantization
Sunil Arya and David M. Mount. */
box_diff = root->cut_bounds_lv - point_coord[dim];
if (box_diff < 0)
{
box_diff = 0;
}
dist_right = min_dist - box_diff * box_diff + new_offset * new_offset;
if (dist_right < closest_dist[k - 1] * eps_fac)
{
/* Search right subtree if minimum distance is below limit*/
search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
}
else
{
/* Right of cutting plane */
dist_right = min_dist;
if (dist_right < closest_dist[k - 1] * eps_fac)
{
/* Search right subtree if minimum distance is below limit*/
search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
/* Left of cutting plane. Update minimum distance.
See Algorithms for Fast Vector Quantization
Sunil Arya and David M. Mount. */
box_diff = point_coord[dim] - root->cut_bounds_hv;
if (box_diff < 0)
{
box_diff = 0;
}
dist_left = min_dist - box_diff * box_diff + new_offset * new_offset;
if (dist_left < closest_dist[k - 1] * eps_fac)
{
/* Search left subtree if minimum distance is below limit*/
search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist);
}
}
}
/************************************************
Search for nearest neighbour for a set of query points
Params:
tree : Tree struct of kd tree
pa : data points
pidx : permutation index of data points
point_coords : query points
num_points : number of query points
mask : boolean array of invalid (True) and valid (False) data points
closest_idx : index of closest data point found (return)
closest_dist : distance to closest point (return)
************************************************/
void search_tree_double(Tree_double *tree, double *pa, double *point_coords,
uint32_t num_points, uint32_t k, double distance_upper_bound,
double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists)
{
double min_dist;
double eps_fac = 1 / ((1 + eps) * (1 + eps));
int8_t no_dims = tree->no_dims;
double *bbox = tree->bbox;
uint32_t *pidx = tree->pidx;
uint32_t j = 0;
#if defined(_MSC_VER) && defined(_OPENMP)
int32_t i = 0;
int32_t local_num_points = (int32_t) num_points;
#else
uint32_t i;
uint32_t local_num_points = num_points;
#endif
Node_double *root = (Node_double *)tree->root;
/* Queries are OpenMP enabled */
#pragma omp parallel
{
/* The low chunk size is important to avoid L2 cache trashing
for spatial coherent query datasets
*/
#pragma omp for private(i, j) schedule(static, 100) nowait
for (i = 0; i < local_num_points; i++)
{
for (j = 0; j < k; j++)
{
closest_idxs[i * k + j] = UINT32_MAX;
closest_dists[i * k + j] = DBL_MAX;
}
min_dist = get_min_dist_double(point_coords + no_dims * i, no_dims, bbox);
search_splitnode_double(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist,
k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]);
}
}
}
|
critical.c | #include <omp.h>
#include <stdio.h>
main()
{
int x;
x = 0;
#pragma omp parallel shared(x)
{
#pragma omp critical
x = x + 1;
} /* end of parallel section */
printf("x = %d \n", x);
}
|
gemm_blis.c | /**
* This file is part of convGemm
*
* Copyright (C) 2021-22 Universitat Politècnica de València and
* Universitat Jaume I
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gemm_blis.h"
cntx_t *blis_cntx = NULL;
sgemm_ukr_ft blis_gemm_kernel = NULL;
int blis_abi_version = BLIS_ABI_VERSION;
/*
* Initializes BLIS, blis_cntx and blis_gemm_kernel
*/
void gemm_blis_init() {
if (blis_cntx == NULL) {
bli_init();
blis_cntx = bli_gks_query_cntx();
blis_gemm_kernel = bli_cntx_get_l3_nat_ukr_dt(BLIS_FLOAT, (l3ukr_t) BLIS_GEMM, blis_cntx);
}
}
/*
* BLIS pack for M-->Mc
*/
void pack_RB(char orderM, char transM, int mc, int nc, const float *restrict M, int ldM, float *restrict Mc, int RR,
const conv_p *conv_params, int start_row, int start_col) {
int i, j, ii, k, rr;
if ((transM == 'N') && (orderM == 'C'))
M = &Mcol(start_row, start_col);
else if ((transM == 'N') && (orderM == 'R'))
M = &Mrow(start_row, start_col);
else if ((transM == 'T') && (orderM == 'C'))
M = &Mcol(start_col, start_row);
else
M = &Mrow(start_col, start_row);
if (((transM == 'N') && (orderM == 'C')) ||
((transM == 'T') && (orderM == 'R')))
#pragma omp parallel for private(i, j, ii, rr, k)
for (i = 0; i < mc; i += RR) {
k = i * nc;
rr = min(mc - i, RR);
for (j = 0; j < nc; j++) {
for (ii = 0; ii < rr; ii++) {
Mc[k] = Mcol(i + ii, j);
k++;
}
for (ii = rr; ii < RR; ii++) {
Mc[k] = (float) 0.0;
k++;
}
// k += (RR-rr);
}
}
else
#pragma omp parallel for private(i, j, ii, rr, k)
for (i = 0; i < mc; i += RR) {
k = i * nc;
rr = min(mc - i, RR);
for (j = 0; j < nc; j++) {
for (ii = 0; ii < rr; ii++) {
Mc[k] = Mcol(j, i + ii);
k++;
}
for (ii = rr; ii < RR; ii++) {
Mc[k] = (float) 0.0;
k++;
}
// k += (RR-rr);
}
}
}
/*
* BLIS pack for M-->Mc
*/
void pack_CB(char orderM, char transM, int mc, int nc, const float *restrict M, int ldM, float *restrict Mc, int RR,
const conv_p *conv_params, int start_row, int start_col) {
int i, j, jj, k, nr;
if ((transM == 'N') && (orderM == 'C'))
M = &Mcol(start_row, start_col);
else if ((transM == 'N') && (orderM == 'R'))
M = &Mrow(start_row, start_col);
else if ((transM == 'T') && (orderM == 'C'))
M = &Mcol(start_col, start_row);
else
M = &Mrow(start_col, start_row);
k = 0;
if (((transM == 'N') && (orderM == 'C')) ||
((transM == 'T') && (orderM == 'R')))
#pragma omp parallel for private(i, j, jj, nr, k)
for (j = 0; j < nc; j += RR) {
k = j * mc;
nr = min(nc - j, RR);
for (i = 0; i < mc; i++) {
for (jj = 0; jj < nr; jj++) {
Mc[k] = Mcol(i, j + jj);
k++;
}
for (jj = nr; jj < RR; jj++) {
Mc[k] = (float) 0.0;
k++;
}
// k += (RR-nr);
}
}
else
#pragma omp parallel for private(i, j, jj, nr, k)
for (j = 0; j < nc; j += RR) {
k = j * mc;
nr = min(nc - j, RR);
for (i = 0; i < mc; i++) {
for (jj = 0; jj < nr; jj++) {
Mc[k] = Mcol(j + jj, i);
k++;
}
for (jj = nr; jj < RR; jj++) {
Mc[k] = (float) 0.0;
k++;
}
// k += (RR-nr);
}
}
}
/*
* sxpbyM implementation
*/
void sxpbyM(int m, int n, const float *restrict X, int ldx, float beta, float *restrict Y, int ldy) {
if (beta == 0.0) {
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
Y[j * ldy + i] = X[j * ldx + i];
} else if (beta == 1.0) {
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
Y[j * ldy + i] += X[j * ldx + i];
} else {
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
Y[j * ldy + i] = beta * Y[j * ldy + i] + X[j * ldx + i];
}
}
|
copyin-1.c | /* { dg-do run } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdlib.h>
int thr = 32;
#pragma omp threadprivate (thr)
int
main (void)
{
int l = 0;
omp_set_dynamic (0);
omp_set_num_threads (6);
#pragma omp parallel copyin (thr) reduction (||:l)
{
l = thr != 32;
thr = omp_get_thread_num () + 11;
}
if (l || thr != 11)
abort ();
#pragma omp parallel reduction (||:l)
l = thr != omp_get_thread_num () + 11;
if (l)
abort ();
return 0;
}
|
GB_unop__isnan_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isnan_bool_fp64)
// op(A') function: GB (_unop_tran__isnan_bool_fp64)
// C type: bool
// A type: double
// cast: double cij = (aij)
// unaryop: cij = isnan (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isnan (x) ;
// casting
#define GB_CAST(z, aij) \
double z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (aij) ; \
Cx [pC] = isnan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isnan_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isnan (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isnan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isnan_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SumaVectoresC-parallel.c | /*Para compilar usar (-lrt: real time library):
gcc -O2 SumaVectores.c -o SumaVectores -lrt
Para ejecutar use: SumaVectoresC longitud
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#include <omp.h>
//#define PRINTF_ALL// comentar para quitar el printf ...
// que imprime todos los componentes
//Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ...
//tres defines siguientes puede estar descomentado):
//#define VECTOR_LOCAL // descomentar para que los vectores sean variables ...
// locales (si se supera el tamaño de la pila se ...
// generará el error "Violación de Segmento")
#define VECTOR_GLOBAL// descomentar para que los vectores sean variables ...
// globales (su longitud no estará limitada por el ...
// tamaño de la pila del programa)
//#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables ...
// dinámicas (memoria reutilizable durante la ejecución)
#ifdef VECTOR_GLOBAL
#define MAX 33554432 //=2^25
double v1[MAX], v2[MAX], v3[MAX];
#endif
int main(int argc, char** argv){
int i;
// struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución
// //Leer argumento de entrada (nº de componentes del vector)
double start, end, elapsed;
if (argc<2){
printf("Faltan nº componentes del vector\n");
exit(-1);
}
unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
#ifdef VECTOR_LOCAL
double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución ...
// disponible en C a partir de actualización C99
#endif
#ifdef VECTOR_GLOBAL
if (N>MAX) N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double *v1, *v2, *v3;
v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes
v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL
v3 = (double*) malloc(N*sizeof(double));
if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vectores
#pragma omp parallel for
for(i=0; i<N; i++){
v1[i] = N*0.1+i*0.1;
v2[i] = N*0.1-i*0.1; //los valores dependen de N
}
//Calcular suma de vectores
start= omp_get_wtime( );
#pragma omp parallel for
for(i=0; i<N; i++)
v3[i] = v1[i] + v2[i];
end = omp_get_wtime( );
elapsed = end - start;
//Imprimir resultado de la suma y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",elapsed,N);
for(i=0; i<N; i++)
printf("/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n",
i,i,i,v1[i],v2[i],v3[i]);
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f)//V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n",elapsed,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]);
#endif
printf("Vector resultante v3[8],v3[11]:\n \t V3[8]=%d, V3[11]=%d\n",v3[8],v3[11]);
#ifdef VECTOR_DYNAMIC
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
free(v3); // libera el espacio reservado para v3
#endif
return 0;
}
|
utils.h | #ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <assert.h>
#include "pixman-private.h" /* For 'inline' definition */
#include "utils-prng.h"
#if defined(_MSC_VER)
#define snprintf _snprintf
#define strcasecmp _stricmp
#endif
#define ARRAY_LENGTH(A) ((int) (sizeof (A) / sizeof ((A) [0])))
/* A primitive pseudorandom number generator,
* taken from POSIX.1-2001 example
*/
extern prng_t prng_state_data;
extern prng_t *prng_state;
#ifdef USE_OPENMP
#pragma omp threadprivate(prng_state_data)
#pragma omp threadprivate(prng_state)
#endif
static inline uint32_t
prng_rand (void)
{
return prng_rand_r (prng_state);
}
static inline void
prng_srand (uint32_t seed)
{
if (!prng_state)
{
/* Without setting a seed, PRNG does not work properly (is just
* returning zeros). So we only initialize the pointer here to
* make sure that 'prng_srand' is always called before any
* other 'prng_*' function. The wrongdoers violating this order
* will get a segfault. */
prng_state = &prng_state_data;
}
prng_srand_r (prng_state, seed);
}
static inline uint32_t
prng_rand_n (int max)
{
return prng_rand () % max;
}
static inline void
prng_randmemset (void *buffer, size_t size, prng_randmemset_flags_t flags)
{
prng_randmemset_r (prng_state, buffer, size, flags);
}
/* CRC 32 computation
*/
uint32_t
compute_crc32 (uint32_t in_crc32,
const void *buf,
size_t buf_len);
uint32_t
compute_crc32_for_image (uint32_t in_crc32,
pixman_image_t *image);
/* Print the image in hexadecimal */
void
print_image (pixman_image_t *image);
/* Returns TRUE if running on a little endian system
*/
static force_inline pixman_bool_t
is_little_endian (void)
{
unsigned long endian_check_var = 1;
return *(unsigned char *)&endian_check_var == 1;
}
/* perform endian conversion of pixel data
*/
void
image_endian_swap (pixman_image_t *img);
#if defined (HAVE_MPROTECT) && defined (HAVE_GETPAGESIZE) && \
defined (HAVE_SYS_MMAN_H) && defined (HAVE_MMAP)
/* fence_malloc and friends have working fence implementation.
* Without this, fence_malloc still allocs but does not catch
* out-of-bounds accesses.
*/
#define FENCE_MALLOC_ACTIVE 1
#else
#define FENCE_MALLOC_ACTIVE 0
#endif
/* Allocate memory that is bounded by protected pages,
* so that out-of-bounds access will cause segfaults
*/
void *
fence_malloc (int64_t len);
void
fence_free (void *data);
pixman_image_t *
fence_image_create_bits (pixman_format_code_t format,
int min_width,
int height,
pixman_bool_t stride_fence);
/* Return the page size if FENCE_MALLOC_ACTIVE, or zero otherwise */
unsigned long
fence_get_page_size ();
/* Generate n_bytes random bytes in fence_malloced memory */
uint8_t *
make_random_bytes (int n_bytes);
/* Return current time in seconds */
double
gettime (void);
uint32_t
get_random_seed (void);
/* main body of the fuzzer test */
int
fuzzer_test_main (const char *test_name,
int default_number_of_iterations,
uint32_t expected_checksum,
uint32_t (*test_function)(int testnum, int verbose),
int argc,
const char *argv[]);
void
fail_after (int seconds, const char *msg);
/* If possible, enable traps for floating point exceptions */
void enable_divbyzero_exceptions(void);
void enable_invalid_exceptions(void);
/* Converts a8r8g8b8 pixels to pixels that
* - are not premultiplied,
* - are stored in this order in memory: R, G, B, A, regardless of
* the endianness of the computer.
* It is allowed for @src and @dst to point to the same memory buffer.
*/
void
a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels);
pixman_bool_t
write_png (pixman_image_t *image, const char *filename);
void
draw_checkerboard (pixman_image_t *image,
int check_size,
uint32_t color1, uint32_t color2);
/* A pair of macros which can help to detect corruption of
* floating point registers after a function call. This may
* happen if _mm_empty() call is forgotten in MMX/SSE2 fast
* path code, or ARM NEON assembly optimized function forgets
* to save/restore d8-d15 registers before use.
*/
#define FLOAT_REGS_CORRUPTION_DETECTOR_START() \
static volatile double frcd_volatile_constant1 = 123451; \
static volatile double frcd_volatile_constant2 = 123452; \
static volatile double frcd_volatile_constant3 = 123453; \
static volatile double frcd_volatile_constant4 = 123454; \
static volatile double frcd_volatile_constant5 = 123455; \
static volatile double frcd_volatile_constant6 = 123456; \
static volatile double frcd_volatile_constant7 = 123457; \
static volatile double frcd_volatile_constant8 = 123458; \
double frcd_canary_variable1 = frcd_volatile_constant1; \
double frcd_canary_variable2 = frcd_volatile_constant2; \
double frcd_canary_variable3 = frcd_volatile_constant3; \
double frcd_canary_variable4 = frcd_volatile_constant4; \
double frcd_canary_variable5 = frcd_volatile_constant5; \
double frcd_canary_variable6 = frcd_volatile_constant6; \
double frcd_canary_variable7 = frcd_volatile_constant7; \
double frcd_canary_variable8 = frcd_volatile_constant8;
#define FLOAT_REGS_CORRUPTION_DETECTOR_FINISH() \
assert (frcd_canary_variable1 == frcd_volatile_constant1); \
assert (frcd_canary_variable2 == frcd_volatile_constant2); \
assert (frcd_canary_variable3 == frcd_volatile_constant3); \
assert (frcd_canary_variable4 == frcd_volatile_constant4); \
assert (frcd_canary_variable5 == frcd_volatile_constant5); \
assert (frcd_canary_variable6 == frcd_volatile_constant6); \
assert (frcd_canary_variable7 == frcd_volatile_constant7); \
assert (frcd_canary_variable8 == frcd_volatile_constant8);
/* Try to get an aligned memory chunk */
void *
aligned_malloc (size_t align, size_t size);
double
convert_srgb_to_linear (double component);
double
convert_linear_to_srgb (double component);
void
initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb);
pixman_format_code_t
format_from_string (const char *s);
void
list_formats (void);
void
list_operators (void);
pixman_op_t
operator_from_string (const char *s);
const char *
operator_name (pixman_op_t op);
const char *
format_name (pixman_format_code_t format);
typedef struct
{
double r, g, b, a;
} color_t;
void
do_composite (pixman_op_t op,
const color_t *src,
const color_t *mask,
const color_t *dst,
color_t *result,
pixman_bool_t component_alpha);
void
round_color (pixman_format_code_t format, color_t *color);
typedef struct
{
pixman_format_code_t format;
uint32_t am, rm, gm, bm;
uint32_t as, rs, gs, bs;
uint32_t aw, rw, gw, bw;
} pixel_checker_t;
void
pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format);
void
pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel,
int *a, int *r, int *g, int *b);
void
pixel_checker_get_max (const pixel_checker_t *checker, color_t *color,
int *a, int *r, int *g, int *b);
void
pixel_checker_get_min (const pixel_checker_t *checker, color_t *color,
int *a, int *r, int *g, int *b);
pixman_bool_t
pixel_checker_check (const pixel_checker_t *checker,
uint32_t pixel, color_t *color);
void
pixel_checker_convert_pixel_to_color (const pixel_checker_t *checker,
uint32_t pixel, color_t *color);
void
pixel_checker_get_masks (const pixel_checker_t *checker,
uint32_t *am,
uint32_t *rm,
uint32_t *gm,
uint32_t *bm);
|
7316.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd schedule(static, 2) num_threads(2)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
GB_unop__identity_int16_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_fc64)
// op(A') function: GB (_unop_tran__identity_int16_fc64)
// C type: int16_t
// A type: GxB_FC64_t
// cast: int16_t cij = GB_cast_to_int16_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = GB_cast_to_int16_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = GB_cast_to_int16_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_fc64)
(
int16_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int16_t z = GB_cast_to_int16_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Helm3dkaczswp_mex.c | #include <stdlib.h> /* for getenv */
#include <stddef.h> /* for size_t type */
#include <string.h> /* for memcpy */
#include <omp.h>
#include "math.h"
#include "mex.h"
#include "matrix.h"
#include <assert.h>
#include "Helm3d_27pt.h"
#ifdef DERIV
#undef DERIV
#endif
/*
*
* 27pt stencil Helmholtz matrix-vector product.
*
*
* Usage:
* y = Helm3dkaczswp_mex(wn,h,n,nmpl,omega,x,b);
*
* Input:
* wn - wavenumber evaluated on pml grid
* h - [hx,hy,hz] spacings in each direction
* n - [nx,ny,nz] points in each dimension, including pml
* npml - 2 x 3 matrix denoting the number of pml points in each dimension
* omega - relaxation parameter, in (0,2)
* x - nx*ny*nz x 1 vector
* b - right hand side, nx*ny*nz x 1 vector
*
* Output:
* y - Kaczmarz sweep applied to x
*
* Curt Da Silva, 2015
*
* To compile, run:
* mex -O -largeArrayDims Helm3dkaczswp_mex.c -DDEFINEUNIX -lmwblas CFLAGS="\$CFLAGS -fopenmp -std=c99" LDFLAGS="\$LDFLAGS -fopenmp"
*
*/
#define WN prhs[0]
#define H prhs[1]
#define N prhs[2]
#define NPML prhs[3]
#define OMEGA prhs[4]
#define X prhs[5]
#define B prhs[6]
#define OUTPUT plhs[0]
void init_zero(double *x, int n){
int i;
for(i=0;i<n;i++){
x[i] = 0;
}
}
void do_sweep( double * wnr, double * wni, double * h, double * n, double * npml,
double *yr, double *yi, double *xr, double *xi, double * br, double * bi,
const double omega, int P) {
int i,j,k,kout,t,s;
int nx = (int)n[0]; int ny = (int)n[1]; int nz = (int)n[2];
int npmlx_lo = (int)npml[0]; int npmlx_hi = (int)npml[1];
int npmly_lo = (int)npml[2]; int npmly_hi = (int)npml[3];
int npmlz_lo = (int)npml[4]; int npmlz_hi = (int)npml[5];
coef_consts c = compute_coef_consts(h);
int pmlz_alloc = 0; int pmly_alloc = 0; int pmlx_alloc = 0;
double complex coef[27];
double complex x[27];
double complex d;
double row_norm_sq;
wn_type wn_window[27];
pml_info p;
pml_adj_info padj;
double complex atx;
p.x_hasL = 0; p.x_hasR = 1;
p.y_hasL = 0; p.y_hasR = 1;
p.z_hasL = 0; p.z_hasR = 1;
#pragma omp parallel for schedule(static) private(i,j,k,kout,t,wn_window,coef,atx,row_norm_sq,d,x) firstprivate(p,padj,pmlz_alloc,pmly_alloc,pmlx_alloc) num_threads(P)
for(s=0; s<P; s++)
{
xyzloop_updown(
// Cache a window of the wavenumber around the current point
load_wn_nbrhood(wn_window,wnr,wni,i,j,k,nx,ny,nz,p);
// Get coefficients
get_coefs(coef,wn_window, c, p, padj);
// Cache a window of the wavefield around the current point
load_nbrhoodc(x,yr,yi,i,j,k,nx,ny,nz,s,p);
kout = IDX1D4(i,j,k,s,nx,ny,nz);
atx = 0.0 + 0.0*I;
row_norm_sq = 0;
for(t=0; t<27; t++){
atx += coef[t] * x[t];
row_norm_sq += creal(conj(coef[t])*coef[t]);
}
d = CMPLX(br[kout],bi[kout]);
d = omega*(d - atx)/row_norm_sq;
for(t=0; t<27; t++){
coef[t] = d*conj(coef[t]);
}
nbrhood_update(coef,yr,yi,i,j,k,nx,ny,nz,s,p);
)
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
mwSize n_threads = 1;
double * wnr, *wni, *h, *n, *npml, *yr, *yi, *br, *bi, *xr, *xi;
double omega;
char *n_threads_str = NULL;
int bi_alloc = 0;
int xi_alloc = 0;
int P;
wnr = mxGetPr(WN);
if(mxIsComplex(WN))
{
wni = mxGetPi(WN);
}
else
{
wni = NULL;
}
if(mxGetM(N) != 3 && mxGetN(N) != 3)
{
mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Nsize",
"n must be a 3-length vector");
}
h = mxGetPr(H);
n = mxGetPr(N);
omega = mxGetScalar(OMEGA);
npml = mxGetPr(NPML);
int numel = (int)n[0]*n[1]*n[2];
if(mxGetM(X) != numel) {
mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Xsize",
"x must have nx*ny*nz elements");
}
P = mxGetN(X);
if(mxGetM(B) != numel) {
mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:Helm3dkaczswp_mex:Bsize",
"b must have nx*ny*nz elements");
}
xr = mxGetPr(X);
if(mxIsComplex(X)){
xi = mxGetPi(X);
}
else{
xi = mxCalloc(numel*P,sizeof(double));
xi_alloc = 1;
}
br = mxGetPr(B);
if(mxIsComplex(B)){ bi = mxGetPi(B); }
else
{
bi = mxCalloc(numel*P,sizeof(double));
bi_alloc = 1;
}
/* define output vector y and initialize with input vector x.*/
OUTPUT = mxCreateDoubleMatrix(numel, P, mxCOMPLEX);
yr = mxGetPr(OUTPUT);
yi = mxGetPi(OUTPUT);
memcpy( yr, xr, sizeof(double)*numel*P );
memcpy( yi, xi, sizeof(double)*numel*P );
do_sweep( wnr, wni, h, n, npml, yr, yi, xr, xi, br, bi,omega,P );
if (bi_alloc) { mxFree(bi); }
if (xi_alloc) { mxFree(xi); }
return;
}
|
conv3x3s1_winograd64_neon5_AoA.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_neon5_AoA(const Mat& bottom_blob, Mat& top_blob, const Mat& _bias, const Option& opt,
int inch, int outw, int outh, int outch)
{
const float* bias = _bias;
Mat top_blob_tm = bottom_blob;
Mat top_blob_bordered = top_blob;
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
#if __aarch64__
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm_00;
float32x4_t _output0_tm_11;
float32x4_t _output0_tm_22;
float32x4_t _output0_tm_33;
float32x4_t _output0_tm_44;
float32x4_t _output0_tm_55;
float32x4_t _output0_tm_66;
float32x4_t _output0_tm_77;
_output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0);
_output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1);
_output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2);
_output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3);
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0 += out0_tm.w*tiles*25;
output0_tm1 += out0_tm.w*tiles*25;
output0_tm2 += out0_tm.w*tiles*25;
output0_tm3 += out0_tm.w*tiles*25;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*24);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles*32);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*40);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*48);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*56);
float* t0 = tmp[0];
float* t1 = tmp[1];
// int step = out0_tm.w * tiles * 2*4 *4;
int step = out0_tm.w * tiles *4;
asm volatile(
// loop0
// "vld1.f32 {d16-d17}, [%2], %21 \n"
// "vld1.f32 {d18-d19}, [%3], %21 \n"
// "vld1.f32 {d20-d21}, [%4], %21 \n"
// "vld1.f32 {d22-d23}, [%5], %21 \n"
// "vld1.f32 {d24-d25}, [%6], %21 \n"
// "vld1.f32 {d26-d27}, [%7], %21 \n"
// "vld1.f32 {d28-d29}, [%8], %21 \n"
// "vld1.f32 {d30-d31}, [%9], %21 \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%2], %21 \n"
"vld1.f32 {d16[1]}, [%3], %21 \n"
"vld1.f32 {d17[0]}, [%4], %21 \n"
"vld1.f32 {d17[1]}, [%5], %21 \n"
"vld1.f32 {d20[0]}, [%2], %21 \n"
"vld1.f32 {d20[1]}, [%3], %21 \n"
"vld1.f32 {d21[0]}, [%4], %21 \n"
"vld1.f32 {d21[1]}, [%5], %21 \n"
"vld1.f32 {d24[0]}, [%2], %21 \n"
"vld1.f32 {d24[1]}, [%3], %21 \n"
"vld1.f32 {d25[0]}, [%4], %21 \n"
"vld1.f32 {d25[1]}, [%5], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %21 \n"
"vld1.f32 {d28[1]}, [%3], %21 \n"
"vld1.f32 {d29[0]}, [%4], %21 \n"
"vld1.f32 {d29[1]}, [%5], %21 \n"
"vld1.f32 {d18[0]}, [%2], %21 \n"
"vld1.f32 {d18[1]}, [%3], %21 \n"
"vld1.f32 {d19[0]}, [%4], %21 \n"
"vld1.f32 {d19[1]}, [%5], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %21 \n"
"vld1.f32 {d22[1]}, [%3], %21 \n"
"vld1.f32 {d23[0]}, [%4], %21 \n"
"vld1.f32 {d23[1]}, [%5], %21 \n"
"vld1.f32 {d26[0]}, [%2], %21 \n"
"vld1.f32 {d26[1]}, [%3], %21 \n"
"vld1.f32 {d27[0]}, [%4], %21 \n"
"vld1.f32 {d27[1]}, [%5], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2] \n"
"vld1.f32 {d30[1]}, [%3] \n"
"vld1.f32 {d31[0]}, [%4] \n"
"vld1.f32 {d31[1]}, [%5] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
// "vld1.f32 {d16-d17}, [%2] \n"
// "vld1.f32 {d18-d19}, [%3] \n"
// "vld1.f32 {d20-d21}, [%4] \n"
// "vld1.f32 {d22-d23}, [%5] \n"
// "vld1.f32 {d24-d25}, [%6] \n"
// "vld1.f32 {d26-d27}, [%7] \n"
// "vld1.f32 {d28-d29}, [%8] \n"
// "vld1.f32 {d30-d31}, [%9] \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%6], %21 \n"
"vld1.f32 {d16[1]}, [%7], %21 \n"
"vld1.f32 {d17[0]}, [%8], %21 \n"
"vld1.f32 {d17[1]}, [%9], %21 \n"
"vld1.f32 {d20[0]}, [%6], %21 \n"
"vld1.f32 {d20[1]}, [%7], %21 \n"
"vld1.f32 {d21[0]}, [%8], %21 \n"
"vld1.f32 {d21[1]}, [%9], %21 \n"
"vld1.f32 {d24[0]}, [%6], %21 \n"
"vld1.f32 {d24[1]}, [%7], %21 \n"
"vld1.f32 {d25[0]}, [%8], %21 \n"
"vld1.f32 {d25[1]}, [%9], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%6], %21 \n"
"vld1.f32 {d28[1]}, [%7], %21 \n"
"vld1.f32 {d29[0]}, [%8], %21 \n"
"vld1.f32 {d29[1]}, [%9], %21 \n"
"vld1.f32 {d18[0]}, [%6], %21 \n"
"vld1.f32 {d18[1]}, [%7], %21 \n"
"vld1.f32 {d19[0]}, [%8], %21 \n"
"vld1.f32 {d19[1]}, [%9], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%6], %21 \n"
"vld1.f32 {d22[1]}, [%7], %21 \n"
"vld1.f32 {d23[0]}, [%8], %21 \n"
"vld1.f32 {d23[1]}, [%9], %21 \n"
"vld1.f32 {d26[0]}, [%6], %21 \n"
"vld1.f32 {d26[1]}, [%7], %21 \n"
"vld1.f32 {d27[0]}, [%8], %21 \n"
"vld1.f32 {d27[1]}, [%9], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%6] \n"
"vld1.f32 {d30[1]}, [%7] \n"
"vld1.f32 {d31[0]}, [%8] \n"
"vld1.f32 {d31[1]}, [%9] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm1_0), // %3
"=r"(output0_tm2_0), // %4
"=r"(output0_tm3_0), // %5
"=r"(output0_tm0_4), // %6
"=r"(output0_tm1_4), // %7
"=r"(output0_tm2_4), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm1_0),
"4"(output0_tm2_0),
"5"(output0_tm3_0),
"6"(output0_tm0_4),
"7"(output0_tm1_4),
"8"(output0_tm2_4),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 8;
output0_tm_1 += out0_tm.w * tiles * 8;
output0_tm_2 += out0_tm.w * tiles * 8;
output0_tm_3 += out0_tm.w * tiles * 8;
output0_tm_4 += out0_tm.w * tiles * 8;
output0_tm_5 += out0_tm.w * tiles * 8;
output0_tm_6 += out0_tm.w * tiles * 8;
output0_tm_7 += out0_tm.w * tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
}
}
|
GB_unaryop__lnot_uint16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint8
// op(A') function: GB_tran__lnot_uint16_uint8
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint8
(
uint16_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pooling2x2s2_max_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
}
|
perfect_number_stubs.c | /* vim: set ft=c sw=2 ts=2: */
/* perfect_number_stubs.c
* code stubs for perfect number calculation in C
* to be called by OCaml */
#include <stdio.h>
#include <caml/alloc.h>
#include <caml/memory.h>
#include <caml/mlvalues.h>
/** c implementation of perfect number predicate */
int is_perfect(int n) {
int sum = 0;
for(int i = 1; i < n; i++) {
if(n % i == 0) sum += i;
}
return (sum == n);
}
/** predicate to check whether a number is perfect */
CAMLprim value is_perfect_c(value ml_n) {
CAMLparam1(ml_n);
CAMLlocal1(res);
int n = Int_val(ml_n);
res = Val_bool(is_perfect(n));
CAMLreturn(res);
}
/** Create a list of perfect numbers upto given limit n */
/*
CAMLprim value perfect_numbers_c(value ml_n) {
CAMLparam1(ml_n);
CAMLlocal2(res_list, cons);
res_list = Val_emptylist;
int n = Int_val(ml_n);
#pragma omp for
for(int i = (n-1); i > 1; i--) {
if (is_perfect(i)) {
cons = caml_alloc(2, 0);
Store_field(cons, 0, Val_int(i));
Store_field(cons, 1, res_list);
res_list = cons;
}
}
CAMLreturn(res_list);
}
*/
|
GB_unaryop__identity_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint32_fp32
// op(A') function: GB_tran__identity_uint32_fp32
// C type: uint32_t
// A type: float
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint32_fp32
(
uint32_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c |
void normalize_evectors(double complex *evecs);
void reigenvalues(double complex *A, double complex *Q, double complex *evals, double complex *evecs, int nA, int generalized)
{
/* Computes the eigenvalues and right eigenvectors of the complex matrix A which is NxN.
A . evecs = evals Q . evecs
This is essentially a wrapper for the ZGEEV LAPACK routine.
INPUTS:
The matrices M, Q, and evecs and the vector evals which are all overwritten
OUTPUTS:
The eigenvalues are stored in the evals array.
The eigenvectors are stored in the ROWS of the evecs matrix
*/
int i,j;
char JOBVL = 'N';
char JOBVR = 'V';
int INFO;
int LDA = nA;
int LDB = nA;
int LDVL = nA;
int LDVR = nA;
int LWORK = 2*nA;
double *RWORK = (double *)malloc(sizeof(double)*8*nA);
double complex *CWORK = (double complex *)malloc(sizeof(double complex)*2*nA);
double complex *tA = (double complex *)malloc(sizeof(double complex)*nA*nA);
double complex *tQ = (double complex *)malloc(sizeof(double complex)*nA*nA);
double complex *evals_alpha = (double complex *)malloc(sizeof(double complex)*nA);
double complex *evals_beta = (double complex *)malloc(sizeof(double complex)*nA);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
tA[i+nA*j] = A[j+nA*i];
tQ[i+nA*j] = Q[j+nA*i];
}
}
if (generalized) {
zggev_( &JOBVL, &JOBVR, &nA, tA, &LDA, tQ, &LDB, evals_alpha,evals_beta, NULL, &LDVL, evecs, &LDVR, CWORK, &LWORK, RWORK, &INFO );
for(i=0;i<nA;i++) {
if (cabs(evals_beta[i]) != 0) {
evals[i] = evals_alpha[i]/evals_beta[i];
}
}
}
else {
zgeev_( &JOBVL, &JOBVR, &nA, tA, &LDA, evals, tQ, &LDVL, evecs, &LDVR, CWORK, &LWORK, RWORK, &INFO );
}
// normalize_evectors(evecs);
SAFE_FREE(tA); SAFE_FREE(tQ);
SAFE_FREE(RWORK); SAFE_FREE(CWORK);
SAFE_FREE(evals_alpha);
SAFE_FREE(evals_beta);
return;
}
void solve(double *A, double *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = NA;
int INFO;
dgesv_(&nA, &NRHS, A, &LDA, IPIV, B, &LDB, &INFO);
return;
}
void matmat(double *A, double *B, double *C,
double alpha, double beta, int nA)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A,B, and C are all matrices.
This is essenitally a wrapper for the ZGEMM BLAS routine
*/
int i,j;
char TRANSA = 't';
char TRANSB = 't';
int m = nA;
int n = nA;
int k = nA;
int LDA = nA;
int LDB = nA;
int LDC = nA;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) work[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = work[i+N*j];
}
dgemm_(&TRANSA, &TRANSB, &m,&n,&k,&alpha,A,&LDA,B,&LDB,&beta,C,&LDC);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) work[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = work[i+N*j];
}
return;
}
void matmat_simple(double *A, double *B, double *C, double alpha, double beta,int n) {
int i,j;
for(i=0;i<n;i++) {
for(j=0;j<n;j++) {
res = 0;
for(k=0;k<n;k++) {
res += a[k+ i*n]*b[j + k*n];
}
c[j + i*n] = alpha * res + beta * c[j+i*n];
}
}
return;
}
void cmatmat(double complex *A, double complex *B, double complex *C,
double complex alpha, double complex beta, int nA)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A,B, and C are all matrices.
This is essenitally a wrapper for the ZGEMM BLAS routine
*/
int i,j;
char TRANSA = 't';
char TRANSB = 't';
int m = nA;
int n = nA;
int k = nA;
int LDA = nA;
int LDB = nA;
int LDC = nA;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) cwork[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = cwork[i+N*j];
}
zgemm_(&TRANSA, &TRANSB, &m,&n,&k,&alpha,A,&LDA,B,&LDB,&beta,C,&LDC);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) cwork[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = cwork[i+N*j];
}
return;
}
void matvec(double *A, double*B, double *C,
double alpha, double beta, int nB)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A is a matrix, B and C are vectors.
This is essenitally a wrapper for the ZGEMV BLAS routine
*/
char TRANS = 't';
int m = nB;
int n = nB;
int LDA = nB;
int INCX = 1;
int INCY = 1;
dgemv_(&TRANS, &m,&n,&alpha,A,&LDA,B,&INCX,&beta,C,&INCY);
return;
}
void solve(double *A, double *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = nA;
int INFO;
double *AT = (double *)malloc(sizeof(double)*nA*nA);
int i,j;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
AT[i + nA*j] = A[j + nA*i];
}
}
dgesv_(&nA, &NRHS, AT, &LDA, IPIV, B, &LDB, &INFO);
SAFE_FREE(AT);
return;
}
void csolve(double complex *A, double complex *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = nA;
int INFO;
double complex *AT = (double complex *)malloc(sizeof(double complex)*nA*nA);
int i,j;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
AT[i + nA*j] = A[j + nA*i];
}
}
zgesv_(&nA, &NRHS, AT, &LDA, IPIV, B, &LDB, &INFO);
SAFE_FREE(AT);
return;
}
void cmatvec(double complex *A, double complex *B, double complex *C,
double complex alpha, double complex beta, int nB)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A is a matrix, B and C are vectors.
This is essenitally a wrapper for the ZGEMV BLAS routine
*/
char TRANS = 't';
int m = nB;
int n = nB;
int LDA = nB;
int INCX = 1;
int INCY = 1;
zgemv_(&TRANS, &m,&n,&alpha,A,&LDA,B,&INCX,&beta,C,&INCY);
return;
}
//
// void normalize_evectors(double complex *evecs) {
// /* Normalize the eigenvectors */
// /* Calculate the factor to normalize the disk eccentricity.
// Each planet eccentricity will then be normalized by the same factor.
// */
// int i,j,indx;
// double norm;
//
//
// #ifdef OPENMP
// #pragma omp parallel private(i,j,norm,indx) shared(evecs,nrows,ncols)
// #pragma omp for schedule(static)
// #endif
// for(i=0;i<nrows;i++) {
//
//
// norm = 0;
// #ifdef NORMALIZE_INT
// for(j=0;j<N;j++) {
// indx = j + ncols*i;
//
// norm += weights[j]*conj(evecs[indx])*evecs[indx];
//
// }
// norm = sqrt(norm);
// #else
// #ifdef NORMALIZE_MAX
// for(j=0;j<N;j++) {
// indx = j+ncols*i;
// // printf("%lg\t%lg\t%lg",norm,abs(evecs[indx]),fmax(norm,abs(evecs[indx])));
// norm = fmax(norm,abs(evecs[indx]));
// }
// if (norm == 0) norm = 1;
//
//
// #endif
// #endif
// for(j=0;j<ncols;j++) {
// indx = j + ncols*i;
// evecs[indx] /= norm;
// }
// }
//
// return;
//
//
// }
void normalize_evectors(double complex *evecs) {
/* Normalize the eigenvectors */
/* Calculate the factor to normalize the disk eccentricity.
Each planet eccentricity will then be normalized by the same factor.
*/
int i,j,indx;
double norm;
#ifdef OPENMP
#pragma omp parallel private(i,j,norm,indx) shared(evecs,nrows,ncols)
#pragma omp for schedule(static)
#endif
for(i=0;i<nrows;i++) {
norm = 0;
#ifdef NORMALIZE_NORM
for(j=0;j<N;j++) {
indx = j +ncols*i;
norm += conj(evecs[indx])*evecs[indx];
}
norm = sqrt(norm);
#else
#ifdef NORMALIZE_INT
for(j=0;j<N;j++) {
indx = j + ncols*i;
norm += weights[j]*conj(evecs[indx])*evecs[indx];
}
norm = sqrt(norm);
#else
#ifdef NORMALIZE_MAX
for(j=0;j<N;j++) {
indx = j+ncols*i;
// printf("%lg\t%lg\t%lg",norm,abs(evecs[indx]),fmax(norm,abs(evecs[indx])));
norm = fmax(norm,abs(evecs[indx]));
}
#endif
#endif
#endif
if (norm == 0) norm = 1;
for(j=0;j<ncols;j++) {
indx = j + ncols*i;
evecs[indx] /= norm;
}
}
return;
}
void matmat3(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[3] + A[2]*B[6]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[4] + A[2]*B[7]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[5] + A[2]*B[8]);
C[3] = beta*C[3] + alpha*(A[3]*B[0] + A[4]*B[3] + A[5]*B[6]);
C[4] = beta*C[4] + alpha*(A[3]*B[1] + A[4]*B[4] + A[5]*B[7]);
C[5] = beta*C[5] + alpha*(A[3]*B[2] + A[4]*B[5] + A[5]*B[8]);
C[6] = beta*C[6] + alpha*(A[6]*B[0] + A[7]*B[3] + A[8]*B[6]);
C[7] = beta*C[7] + alpha*(A[6]*B[1] + A[7]*B[4] + A[8]*B[7]);
C[8] = beta*C[8] + alpha*(A[6]*B[2] + A[7]*B[5] + A[8]*B[8]);
return;
}
void matvec3(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2]);
C[1] = beta*C[1] + alpha*(A[3]*B[0] + A[4]*B[1] + A[5]*B[2]);
C[2] = beta*C[2] + alpha*(A[6]*B[0] + A[7]*B[1] + A[8]*B[2]);
return;
}
void matvec4(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2] + A[3]*B[3]);
C[1] = beta*C[1] + alpha*(A[4]*B[0] + A[5]*B[1] + A[6]*B[2] + A[7]*B[3]);
C[2] = beta*C[2] + alpha*(A[8]*B[0] + A[9]*B[1] + A[10]*B[2] + A[11]*B[3]);
C[3] = beta*C[3] + alpha*(A[12]*B[0] + A[13]*B[1] + A[14]*B[2] + A[15]*B[3]);
return;
}
void matmat4(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[4] + A[2]*B[8] + A[3]*B[12]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[5] + A[2]*B[9] + A[3]*B[13]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[6] + A[2]*B[10] + A[3]*B[14]);
C[3] = beta*C[3] + alpha*(A[0]*B[3] + A[1]*B[7] + A[2]*B[11] + A[3]*B[15]);
C[4] = beta*C[4] + alpha*(A[4]*B[0] + A[5]*B[4] + A[6]*B[8] + A[7]*B[12]);
C[5] = beta*C[5] + alpha*(A[4]*B[1] + A[5]*B[5] + A[6]*B[9] + A[7]*B[13]);
C[6] = beta*C[6] + alpha*(A[4]*B[2] + A[5]*B[6] + A[6]*B[10] + A[7]*B[14]);
C[7] = beta*C[7] + alpha*(A[4]*B[3] + A[5]*B[7] + A[6]*B[11] + A[7]*B[15]);
C[8] = beta*C[8] + alpha*(A[8]*B[0] + A[9]*B[4] + A[10]*B[8] + A[11]*B[12]);
C[9] = beta*C[9] + alpha*(A[8]*B[1] + A[9]*B[5] + A[10]*B[9] + A[11]*B[13]);
C[10] = beta*C[10] + alpha*(A[8]*B[2] + A[9]*B[6] + A[10]*B[10] + A[11]*B[14]);
C[11] = beta*C[11] + alpha*(A[8]*B[3] + A[9]*B[7] + A[10]*B[11] + A[11]*B[15]);
C[12] = beta*C[12] + alpha*(A[12]*B[0] + A[13]*B[4] + A[14]*B[8] + A[15]*B[12]);
C[13] = beta*C[13] + alpha*(A[12]*B[1] + A[13]*B[5] + A[14]*B[9] + A[15]*B[13]);
C[14] = beta*C[14] + alpha*(A[12]*B[2] + A[13]*B[6] + A[14]*B[10] + A[15]*B[14]);
C[15] = beta*C[15] + alpha*(A[12]*B[3] + A[13]*B[7] + A[14]*B[11] + A[15]*B[15]);
return;
}
void cmatmat3(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[3] + A[2]*B[6]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[4] + A[2]*B[7]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[5] + A[2]*B[8]);
C[3] = beta*C[3] + alpha*(A[3]*B[0] + A[4]*B[3] + A[5]*B[6]);
C[4] = beta*C[4] + alpha*(A[3]*B[1] + A[4]*B[4] + A[5]*B[7]);
C[5] = beta*C[5] + alpha*(A[3]*B[2] + A[4]*B[5] + A[5]*B[8]);
C[6] = beta*C[6] + alpha*(A[6]*B[0] + A[7]*B[3] + A[8]*B[6]);
C[7] = beta*C[7] + alpha*(A[6]*B[1] + A[7]*B[4] + A[8]*B[7]);
C[8] = beta*C[8] + alpha*(A[6]*B[2] + A[7]*B[5] + A[8]*B[8]);
return;
}
void cmatvec3(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2]);
C[1] = beta*C[1] + alpha*(A[3]*B[0] + A[4]*B[1] + A[5]*B[2]);
C[2] = beta*C[2] + alpha*(A[6]*B[0] + A[7]*B[1] + A[8]*B[2]);
return;
}
void cmatvec4(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2] + A[3]*B[3]);
C[1] = beta*C[1] + alpha*(A[4]*B[0] + A[5]*B[1] + A[6]*B[2] + A[7]*B[3]);
C[2] = beta*C[2] + alpha*(A[8]*B[0] + A[9]*B[1] + A[10]*B[2] + A[11]*B[3]);
C[3] = beta*C[3] + alpha*(A[12]*B[0] + A[13]*B[1] + A[14]*B[2] + A[15]*B[3]);
return;
}
void cmatmat4(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[4] + A[2]*B[8] + A[3]*B[12]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[5] + A[2]*B[9] + A[3]*B[13]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[6] + A[2]*B[10] + A[3]*B[14]);
C[3] = beta*C[3] + alpha*(A[0]*B[3] + A[1]*B[7] + A[2]*B[11] + A[3]*B[15]);
C[4] = beta*C[4] + alpha*(A[4]*B[0] + A[5]*B[4] + A[6]*B[8] + A[7]*B[12]);
C[5] = beta*C[5] + alpha*(A[4]*B[1] + A[5]*B[5] + A[6]*B[9] + A[7]*B[13]);
C[6] = beta*C[6] + alpha*(A[4]*B[2] + A[5]*B[6] + A[6]*B[10] + A[7]*B[14]);
C[7] = beta*C[7] + alpha*(A[4]*B[3] + A[5]*B[7] + A[6]*B[11] + A[7]*B[15]);
C[8] = beta*C[8] + alpha*(A[8]*B[0] + A[9]*B[4] + A[10]*B[8] + A[11]*B[12]);
C[9] = beta*C[9] + alpha*(A[8]*B[1] + A[9]*B[5] + A[10]*B[9] + A[11]*B[13]);
C[10] = beta*C[10] + alpha*(A[8]*B[2] + A[9]*B[6] + A[10]*B[10] + A[11]*B[14]);
C[11] = beta*C[11] + alpha*(A[8]*B[3] + A[9]*B[7] + A[10]*B[11] + A[11]*B[15]);
C[12] = beta*C[12] + alpha*(A[12]*B[0] + A[13]*B[4] + A[14]*B[8] + A[15]*B[12]);
C[13] = beta*C[13] + alpha*(A[12]*B[1] + A[13]*B[5] + A[14]*B[9] + A[15]*B[13]);
C[14] = beta*C[14] + alpha*(A[12]*B[2] + A[13]*B[6] + A[14]*B[10] + A[15]*B[14]);
C[15] = beta*C[15] + alpha*(A[12]*B[3] + A[13]*B[7] + A[14]*B[11] + A[15]*B[15]);
return;
}
|
matVec-mpi.c | # include <math.h>
#ifdef USE_MPI
# include <mpi.h>
#endif
# include <stdio.h>
# include <stdlib.h>
# include <time.h>
//#ifdef _OPENMP
# include "omp.h"
//#endif
int main ( int argc, char *argv[] );
void timestamp ( );
/******************************************************************************/
int main ( int argc, char *argv[] )
/******************************************************************************/
/*
Purpose:
MAIN is the main program for MATVEC.
Discussion:
MATVEC uses MPI to compute a matrix-vector product b = A * x.
This is the simple self-scheduling version. Each worker is given
a copy of x, and then is fed one row of A. As soon as it computes
b(I) = A(I,1:N)*x(1:N), it is given another column of A, unless
there are no more, in which case it is sent a "terminate" message.
Thus, a faster process will be given more work to do.
By using allocatable arrays, the amount of memory used has been
controlled. The master process allocates A and x, but the worker
processes only allocate enough memory for one row of A, and x.
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
11 October 2002
Author:
John Burkardt
Reference:
William Gropp, Ewing Lusk, Anthony Skjellum,
Using MPI: Portable Parallel Programming with the
Message-Passing Interface,
Second Edition,
MIT Press, 1999,
ISBN: 0262571323.
Snir, Otto, Huss-Lederman, Walker, Dongarra,
MPI - The Complete Reference,
Volume 1, The MPI Core,
second edition,
MIT Press, 1998.
*/
{
double *a;
double *a_row;
double ans;
double *b;
int dest;
int dummy;
int i;
int ierr;
int j;
int j_one;
int k;
int m;
int master = 0;
int my_id;
int n;
int num_procs;
int num_threads;
int num_rows;
int num_workers;
double pi = 3.141592653589793;
#ifdef USE_MPI
MPI_Status status;
#endif
int tag;
int tag_done;
double *x;
/*
Initialize MPI.
*/
#ifdef _OPENMP
printf ("Using OpenMP library");
#endif
#ifdef USE_MPI
ierr = MPI_Init ( &argc, &argv );
if ( ierr != 0 )
{
printf ( "\n" );
printf ( "MATVEC_MPI - Fatal error!\n" );
printf ( " MPI_Init returns nonzero IERR.\n" );
exit ( 1 );
}
/*
Get this processor's ID.
*/
ierr = MPI_Comm_rank ( MPI_COMM_WORLD, &my_id );
/*
Get the number of processors.
*/
ierr = MPI_Comm_size ( MPI_COMM_WORLD, &num_procs );
#else
my_id = 0;
num_procs=1;
#endif
omp_set_num_threads(16);
#ifdef USE_MPI
#pragma omp parallel
{
num_threads = omp_get_max_threads();
printf("omp_get_num_threads(): %d\n", num_threads);
}
#else
num_threads = 1;
printf("OpenMP not available. Setting num_threads: %d\n", num_threads);
#endif
if ( my_id == 0 )
{
timestamp ( );
printf ( "\n" );
printf ( "MATVEC - Master process:\n" );
printf ( " C version\n" );
printf ( " An MPI example program to compute\n" );
printf ( " a matrix-vector product b = A * x.\n" );
printf ( "\n" );
printf ( " Compiled on %s at %s.\n", __DATE__, __TIME__ );
printf ( "\n" );
printf ( " The number of processes is %d.\n", num_procs );
printf ( " The number of threads per process is %d.\n", num_threads);
}
printf ( "\n" );
printf ( "Process %d is active.\n", my_id );
m = 100;
n = 50;
tag_done = m + 1;
if ( my_id == 0 )
{
printf ( "\n" );
printf ( " The number of rows is %d.\n", m );
printf ( " The number of columns is %d.\n", n );
}
/*
The master process allocates and initializes A and X.
Because we are dynamically allocating A, we can't use 2D array double
indexing, so we have to figure out where we are on our own.
*/
if ( my_id == master )
{
a = ( double * ) malloc ( m * n * sizeof ( double ) );
x = ( double * ) malloc ( n * sizeof ( double ) );
b = ( double * ) malloc ( m * sizeof ( double ) );
k = 0;
#pragma omp parallel for
for ( i = 1; i <= m; i++ )
{
for ( j = 1; j <= n; j++ )
{
a[k] = sqrt ( 2.0 / ( double ) ( n + 1 ) )
* sin ( ( double ) ( i * j ) * pi / ( double ) ( n + 1 ) );
k = k + 1;
}
}
/*
X is specially chosen so that b = A * x is known in advance.
The value of b will be zero, except that entry J_ONE will be 1.
Pick any value of J_ONE between 1 and M.
*/
j_one = 17;
#pragma omp parallel for num_threads(16)
for ( i = 0; i < n; i++ )
{
x[i] = sqrt ( 2.0 / ( double ) ( n + 1 ) )
* sin ( ( double ) ( ( i + 1 ) * j_one ) * pi / ( double ) ( n + 1 ) );
printf("thread %d doing iteration %d \n", omp_get_thread_num(), i);
}
printf ( "\n" );
printf ( "MATVEC - Master process:\n" );
printf ( " Vector x:\n" );
printf ( "\n" );
for ( i = 0; i < n; i++ )
{
printf ( "%d %f\n", i, x[i] );
}
}
/*
Worker processes set aside room for one row of A, and for the
vector X.
*/
else
{
a_row = ( double * ) malloc ( n * sizeof ( double ) );
x = ( double * ) malloc ( n * sizeof ( double ) );
}
/*
Process 0 broadcasts the vector X to the other processes.
*/
#ifdef USE_MPI
ierr = MPI_Bcast ( x, n, MPI_DOUBLE, master, MPI_COMM_WORLD );
#endif
if ( my_id == master )
/*
Process 0 sends one row of A to all the other processes.
If we were using standard C 2D array storage, the entries of
the row would be contiguous; using pointers, we have ended up
in the same situation. As long as the entries are contiguous,
we can use a simple standard datatype with MPI_Send.
The situation would require a little more work if we tried
to send a column of data instead of a row.
*/
{
num_rows = 0;
for ( i = 1; i <= num_procs-1; i++ )
{
dest = i;
tag = num_rows;
k = num_rows * n;
#ifdef USE_MPI
ierr = MPI_Send ( a+k, n, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD );
#endif
num_rows = num_rows + 1;
}
num_workers = num_procs-1;
for ( ; ; )
{
#ifdef USE_MPI
ierr = MPI_Recv ( &ans, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &status );
#endif
#ifdef USE_MPI
tag = status.MPI_TAG;
#endif
b[tag] = ans;
if ( num_rows < m )
{
num_rows = num_rows + 1;
#ifdef USE_MPI
dest = status.MPI_SOURCE;
#endif
tag = num_rows;
k = num_rows * n;
#ifdef USE_MPI
ierr = MPI_Send ( a+k, n, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD );
#endif
}
else
{
num_workers = num_workers - 1;
dummy = 0;
#ifdef USE_MPI
dest = status.MPI_SOURCE;
#endif
tag = tag_done;
#ifdef USE_MPI
ierr = MPI_Send ( &dummy, 1, MPI_INT, dest, tag, MPI_COMM_WORLD );
#endif
if ( num_workers == 0 )
{
break;
}
}
}
free ( a );
free ( x );
}
/*
Each worker process repeatedly receives rows of A (with TAG indicating
which row it is), computes dot products A(I,1:N) * X(1:N) and returns
the result (and TAG), until receiving the "DONE" message.
*/
else
{
for ( ; ; )
{
#ifdef USE_MPI
ierr = MPI_Recv ( a_row, n, MPI_DOUBLE, master, MPI_ANY_TAG,
MPI_COMM_WORLD, &status );
tag = status.MPI_TAG;
#endif
if ( tag == tag_done )
{
printf ( " Process %d shutting down.\n", my_id );
break;
}
ans = 0.0;
#pragma omp parallel for
for ( i = 0; i < n; i++ )
{
ans = ans + a_row[i] * x[i];
}
#ifdef USE_MPI
ierr = MPI_Send ( &ans, 1, MPI_DOUBLE, master, tag, MPI_COMM_WORLD );
#endif
}
free ( a_row );
free ( x );
}
/*
Print out the answer.
*/
if ( my_id == master )
{
printf ( "\n" );
printf ( "MATVEC - Master process:\n" );
printf ( " Product vector b = A * x\n" );
printf ( " (Should be zero, except for a 1 in entry %d)\n", j_one-1 );
printf ( "\n" );
for ( i = 0; i < m; i++ )
{
printf ( "%d %f\n", i, b[i] );
}
free ( b );
}
/*
Terminate MPI.
*/
#ifdef USE_MPI
ierr = MPI_Finalize ( );
#endif
/*
Terminate.
*/
if ( my_id == master )
{
printf ( "\n" );
printf ( "MATVEC - Master process:\n" );
printf ( " Normal end of execution.\n" );
printf ( "\n" );
timestamp ( );
}
return 0;
}
/******************************************************************************/
void timestamp ( )
/******************************************************************************/
/*
Purpose:
TIMESTAMP prints the current YMDHMS date as a time stamp.
Example:
31 May 2001 09:45:54 AM
Licensing:
This code is distributed under the GNU LGPL license.
Modified:
24 September 2003
Author:
John Burkardt
Parameters:
None
*/
{
# define TIME_SIZE 40
static char time_buffer[TIME_SIZE];
const struct tm *tm;
time_t now;
now = time ( NULL );
tm = localtime ( &now );
strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm );
printf ( "%s\n", time_buffer );
return;
# undef TIME_SIZE
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
//#include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
}
return ret;
}
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Failed to parse EXR version", err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
openmp-ex05.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main(void)
{
int orig_num_threads, orig_my_thread;
orig_num_threads = omp_get_num_threads();
orig_my_thread = omp_get_thread_num();
printf ("\"You're all individuals!\" said %d of %d.\n", orig_my_thread, orig_num_threads);
#pragma omp parallel
{
/* The last example showed that variables are shared by default in
* parallel regions: having multiple threads write to the same variable
* creates a race condition.
*
* But, variables declared inside the scope of the parallel region are
* private: each thread has its own private variables */
int my_thread, num_threads;
num_threads = omp_get_num_threads();
my_thread = omp_get_thread_num();
sleep(1);
printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads);
}
orig_num_threads = omp_get_num_threads();
orig_my_thread = omp_get_thread_num();
printf ("\"I'm not,\" said %d of %d.\n", orig_my_thread, orig_num_threads);
return 0;
}
|
blake2sp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
BLAKE2_LOCAL_INLINE(int) blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store48( P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
BLAKE2_LOCAL_INLINE(int) blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store48( P->node_offset, 0ULL );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, outlen );
}
int blake2sp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[id__], in__, len );
}
blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
GB_unaryop__minv_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_uint16
// op(A') function: GB_tran__minv_fp32_uint16
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_uint16
(
float *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <omp.h>
int main () {
int numberOfThreads, threadID;
printf("Setting a fixed number of threads. In this case 8\n");
omp_set_num_threads(8);
numberOfThreads = omp_get_num_threads();
printf("The total number of threads is %d\n", numberOfThreads);
#pragma omp parallel private(threadID)
{
// each thread know its ID using threadID as private
threadID = omp_get_thread_num();
printf("Hello! my ID is %d\n", threadID);
if (threadID == 0) {
numberOfThreads = omp_get_num_threads();
printf("I am the thread 0 and the total numer is %d\n", numberOfThreads);
}
}
printf("Now we use 5 threads\n");
omp_set_num_threads(5);
numberOfThreads = omp_get_num_threads();
printf("Total number of threads %d\n", numberOfThreads);
#pragma omp parallel
{
threadID = omp_get_thread_num();
printf("Hello! my ID is %d\n", threadID);
if (threadID == 0) {
numberOfThreads = omp_get_num_threads();
printf("I am the thread 0 and the total numer is %d\n", numberOfThreads);
}
}
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
atomic-1.c | /* { dg-do run } */
/* { dg-options "-O2 -fopenmp" } */
/* { dg-options "-O2 -fopenmp -march=pentium" { target i?86-*-* x86_64-*-* } } */
/* { dg-options "-O2 -fopenmp" { target lp64 } } */
#ifdef __i386__
#include "../../../gcc/testsuite/gcc.dg/i386-cpuid.h"
#define bit_CX8 (1 << 8)
#endif
extern void abort (void);
double d;
struct
{
int i;
double e;
int j;
} x;
void
f1 (void)
{
#pragma omp atomic
d += 7.5;
#pragma omp atomic
d *= 2.5;
#pragma omp atomic
d /= 0.25;
}
void
f2 (void)
{
#pragma omp atomic
x.e += 7.5;
#pragma omp atomic
x.e *= 2.5;
#pragma omp atomic
x.e /= 0.25;
}
int
main (void)
{
#ifdef __i386__
unsigned long cpu_facilities;
cpu_facilities = i386_cpuid ();
if ((cpu_facilities & bit_CX8) == 0)
return 0;
#endif
d = 1.0;
f1 ();
if (d != 85.0)
abort ();
x.e = 1.0;
f2 ();
if (x.i != 0 || x.e != 85.0 || x.j != 0)
abort ();
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) {
for (t4=max(max(ceild(t1-1020,1024),ceild(4*t2-Nz-2035,2048)),ceild(4*t3-Ny-2035,2048));t4<=min(min(min(floord(4*Nt+Nx-9,2048),floord(2*t1+Nx-3,2048)),floord(4*t2+Nx-9,2048)),floord(4*t3+Nx-9,2048));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
CPULauncher.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#pragma once
#include <cassert>
#include <vector>
#include "open3d/core/AdvancedIndexing.h"
#include "open3d/core/Indexer.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/ParallelUtil.h"
#include "open3d/utility/Console.h"
namespace open3d {
namespace core {
namespace kernel {
class CPULauncher {
public:
/// Fills tensor[:][i] with element_kernel(i).
///
/// \param indexer The input tensor and output tensor to the indexer are the
/// same (as a hack), since the tensor are filled in-place.
/// \param element_kernel A function that takes pointer location and
/// workload_idx, computes the value to fill, and fills the value at the
/// pointer location.
template <typename func_t>
static void LaunchIndexFillKernel(const Indexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx), workload_idx);
}
}
template <typename func_t>
static void LaunchUnaryEWKernel(const Indexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename func_t>
static void LaunchBinaryEWKernel(const Indexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetInputPtr(1, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename func_t>
static void LaunchAdvancedIndexerKernel(const AdvancedIndexer& indexer,
func_t element_kernel) {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
template <typename scalar_t, typename func_t>
static void LaunchReductionKernelSerial(const Indexer& indexer,
func_t element_kernel) {
for (int64_t workload_idx = 0; workload_idx < indexer.NumWorkloads();
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
indexer.GetOutputPtr(workload_idx));
}
}
/// Create num_threads workers to compute partial reductions and then reduce
/// to the final results. This only applies to reduction op with one output.
template <typename scalar_t, typename func_t>
static void LaunchReductionKernelTwoPass(const Indexer& indexer,
func_t element_kernel,
scalar_t identity) {
if (indexer.NumOutputElements() > 1) {
utility::LogError(
"Internal error: two-pass reduction only works for "
"single-output reduction ops.");
}
int64_t num_workloads = indexer.NumWorkloads();
int64_t num_threads = GetMaxThreads();
int64_t workload_per_thread =
(num_workloads + num_threads - 1) / num_threads;
std::vector<scalar_t> thread_results(num_threads, identity);
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) {
int64_t start = thread_idx * workload_per_thread;
int64_t end = std::min(start + workload_per_thread, num_workloads);
for (int64_t workload_idx = start; workload_idx < end;
++workload_idx) {
element_kernel(indexer.GetInputPtr(0, workload_idx),
&thread_results[thread_idx]);
}
}
void* output_ptr = indexer.GetOutputPtr(0);
for (int64_t thread_idx = 0; thread_idx < num_threads; ++thread_idx) {
element_kernel(&thread_results[thread_idx], output_ptr);
}
}
template <typename scalar_t, typename func_t>
static void LaunchReductionParallelDim(const Indexer& indexer,
func_t element_kernel) {
// Prefers outer dimension >= num_threads.
const int64_t* indexer_shape = indexer.GetMasterShape();
const int64_t num_dims = indexer.NumDims();
int64_t num_threads = GetMaxThreads();
// Init best_dim as the outer-most non-reduction dim.
int64_t best_dim = num_dims - 1;
while (best_dim >= 0 && indexer.IsReductionDim(best_dim)) {
best_dim--;
}
for (int64_t dim = best_dim; dim >= 0 && !indexer.IsReductionDim(dim);
--dim) {
if (indexer_shape[dim] >= num_threads) {
best_dim = dim;
break;
} else if (indexer_shape[dim] > indexer_shape[best_dim]) {
best_dim = dim;
}
}
if (best_dim == -1) {
utility::LogError(
"Internal error: all dims are reduction dims, use "
"LaunchReductionKernelTwoPass instead.");
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int64_t i = 0; i < indexer_shape[best_dim]; ++i) {
Indexer sub_indexer(indexer);
sub_indexer.ShrinkDim(best_dim, i, 1);
LaunchReductionKernelSerial<scalar_t>(sub_indexer, element_kernel);
}
}
};
} // namespace kernel
} // namespace core
} // namespace open3d
|
drhook.c | /**
* (C) Copyright 2014- ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
*
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation
* nor does it submit to any jurisdiction.
*/
#define _DRHOOK_C_ 1
#define _GNU_SOURCE
/*
drhook.c
Author: Sami Saarinen, ECMWF, 14..24-Nov-2003
Thanks to Bob Walkup & John Hague for IBM Power4 version
Thanks to Bob Carruthers for Cray X1 (SV2), XD1 and XT3 versions,
as well as David Tanqueray for the flop routines
Also thanks to Roland Richter for suggesting the use
of "call tracebackqq()" function.
In our environment this is accomplished by calling fortran
routine intel_trbk() from ifsaux/utilities/gentrbk.F90.
*/
/*
If intending to run on IBM P4+ or newer systems the following definition
should be activated to use pm_initialize() instead of pm_init() of PMAPI-lib ($LIBHPM)
#define PMAPI_POST_P4
*/
/*
If *ALSO* intending to run on IBM P5+ systems, then set also BOTH
#define PMAPI_POST_P4
#define PMAPI_P5_PLUS
*/
/* Thanks to John Hague (IBM)
If intending to run on IBM p6 systems, then set also BOTH
#define PMAPI_POST_P4
#define PMAPI_P6
*/
#if defined(PMAPI_P7)
#define ENTRY_4 5
#define ENTRY_6 4
#elif defined(PMAPI_P6)
#define ENTRY_4 5
#define ENTRY_6 4
#elif defined(PMAPI_P5_PLUS)
#define ENTRY_4 5
#define ENTRY_6 4
#else
#define ENTRY_4 4
#define ENTRY_6 6
#endif
#if defined(SV2) || defined(XD1) || defined(XT3)
#define DT_FLOP
#define HPM
#define MAX_COUNTERS 6
#endif
#ifdef RS6K
#pragma options opt=3 halt=e
#include <pthread.h>
#endif
#include <unistd.h>
#if defined(DARWIN)
#include <pthread.h>
#endif
#define EC_HOST_NAME_MAX 512
/* === This doesn't handle recursive calls correctly (yet) === */
#include "drhook.h"
#include "cas.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#ifdef __USE_GNU
#include <dlfcn.h>
#endif
static void set_timed_kill();
static void process_options();
static char *TimeStr(char *s, int slen);
int drhook_memtrace = 0; /* set to 1, if opt_memprof or opt_timeline ; used in getcurheap.c to lock stuff */
#if !defined(CACHELINESIZE)
#if defined(LEVEL1_DCACHE_LINESIZE)
#define CACHELINESIZE LEVEL1_DCACHE_LINESIZE
#else
/* ***Note: A hardcoded cache line size in bytes !!! */
#ifdef RS6K
#define CACHELINESIZE 128
#else
#define CACHELINESIZE 64
#endif
#endif
#endif
#include "crc.h"
#include <time.h>
static char *start_stamp = NULL;
static char *end_stamp = NULL;
static int numthreads = 0;
static int myproc = 1;
static int nproc = -1;
static int max_threads = 1;
extern int get_thread_id_();
typedef struct drhook_prefix_t {
char s[3840];
char timestr[256];
int nsigs;
} drhook_prefix_t;
static drhook_prefix_t *ec_drhook = NULL;
static int timestr_len = 0;
#define PREFIX(tid) (ec_drhook && tid >= 1 && tid <= numthreads) ? ec_drhook[tid-1].s : ""
#define TIDNSIGS(tid) (ec_drhook && tid >= 1 && tid <= numthreads) ? ec_drhook[tid-1].nsigs : -1
#define TIMESTR(tid) (timestr_len > 0 && ec_drhook && tid >= 1 && tid <= numthreads) ? TimeStr(ec_drhook[tid-1].timestr,timestr_len) : ""
#define FFL __FUNCTION__,__FILE__,__LINE__
static int drhook_trapfpe_master_init = 0;
static int drhook_trapfpe = 1;
static int drhook_trapfpe_invalid = 1;
static int drhook_trapfpe_divbyzero = 1;
static int drhook_trapfpe_overflow = 1;
#if defined(NECSX)
#pragma cdir options -Nv -Csopt
extern void necsx_trbk_(const char *msg, int msglen); /* from ../utilities/gentrbk.F90 */
#endif
#if defined(LINUX) && !defined(XT3) && !defined(XD1) && !defined(CYGWIN)
#if defined(__GNUC__) && !defined(NO_TRAPFPE)
#if defined(CYGWIN)
#include <mingw/fenv.h>
#else
#include <fenv.h>
#endif
extern int feenableexcept(int excepts);
extern int fedisableexcept(int excepts);
extern int fegetexcept(void);
#if defined(DARWIN)
/* A temporary fix to link on MacIntosh. Something more clever will be done later -REK. */
int feenableexcept (int excepts) { return 0; }
int fedisableexcept(int excepts) { return 0; }
int fegetexcept(void) { return 0; }
#endif
#if defined(__NEC__)
int fegetexcept(void) { return 0; }
#endif
static void trapfpe(int silent)
{
/* Enable some exceptions. At startup all exceptions are masked. */
#if 1
/* New coding -- honours DR_HOOK_TRAPFPE_{INVALID,DIVBYZERO,OVERLOW} set to 1 (or 0) */
int tid = get_thread_id_();
int enable = 0;
int disable = 0;
int dummy;
int rc_enable = 0;
int rc_disable = 0;
int excepts_before, excepts_after;
dummy = drhook_trapfpe_invalid ? (enable |= FE_INVALID) : (disable |= FE_INVALID);
dummy = drhook_trapfpe_divbyzero ? (enable |= FE_DIVBYZERO) : (disable |= FE_DIVBYZERO);
dummy = drhook_trapfpe_overflow ? (enable |= FE_OVERFLOW) : (disable |= FE_OVERFLOW);
if (!silent && myproc == 1) {
excepts_before = fegetexcept();
}
if (enable) rc_enable = feenableexcept(enable); // Turn ON these
if (disable) rc_disable = fedisableexcept(disable); // Turn OFF these
if (!silent && myproc == 1) {
char *pfx = PREFIX(tid);
excepts_after = fegetexcept();
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK trapfpe() : Exceptions before = 0x%x [%d] -- after = 0x%x [%d]\n",
pfx,TIMESTR(tid),FFL,
excepts_before, excepts_before,
excepts_after, excepts_after);
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK trapfpe() : with FE_INVALID = 0x%x [%d] -- FE_DIVBYZERO = 0x%x [%d] -- FE_OVERFLOW = 0x%x [%d]\n",
pfx,TIMESTR(tid),FFL,
(int)FE_INVALID, (int)FE_INVALID,
(int)FE_DIVBYZERO, (int)FE_DIVBYZERO,
(int)FE_OVERFLOW, (int)FE_OVERFLOW);
if (enable) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK trapfpe() : feenableexcept(0x%x [%d]) returns rc=%d\n",
pfx,TIMESTR(tid),FFL,
enable,enable,rc_enable);
}
if (disable) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK trapfpe() : fedisableexcept(0x%x [%d]) returns rc=%d\n",
pfx,TIMESTR(tid),FFL,
disable,disable,rc_disable);
}
if (tid == 1) drhook_trapfpe_master_init = 1; // go-ahead for slave threads in trapfpe_slave_threads()
}
#else
#if defined(PARKIND1_SINGLE) && !defined(SGEMM)
/* For now ... we have issues in SGEMM with IEEE-invalid ... especially with LIBSCI from Cray */
int rc = feenableexcept(FE_DIVBYZERO|FE_OVERFLOW);
#else
int rc = feenableexcept(FE_INVALID|FE_DIVBYZERO|FE_OVERFLOW);
#endif
#endif
}
static void untrapfpe(int silent)
{
/* Disable some exceptions. At startup all exceptions are masked. */
int rc = fedisableexcept(FE_INVALID|FE_DIVBYZERO|FE_OVERFLOW);
}
#endif /* defined(__GNUC__) */
#endif /* defined(LINUX) && !defined(XT3) && !defined(XD1) */
#if (!defined(LINUX) || defined(CYGWIN) || defined(NO_TRAPFPE)) && defined(__GNUC__)
/* For example Solaris with gcc */
#define trapfpe(x)
#define untrapfpe(x)
#endif
#ifndef drhook_harakiri_timeout_default
#define drhook_harakiri_timeout_default 500
#endif
static int drhook_harakiri_timeout = drhook_harakiri_timeout_default;
static int drhook_use_lockfile = 1;
static int atp_enabled = 0; /* Cray ATP specific */
static int atp_max_cores = 20; /* Cray ATP specific */
static int atp_max_analysis_time = 300; /* Cray ATP specific */
static int atp_ignore_sigterm = 0; /* Cray ATP specific */
static int any_memstat = 0;
static int opt_gethwm = 0;
static int opt_getstk = 0;
static int opt_getrss = 0;
static int opt_getpag = 0;
static int opt_walltime = 0;
static int opt_cputime = 0;
static int opt_wallprof = 0;
static int opt_cpuprof = 0;
static int opt_hpmprof = 0;
static int opt_memprof = 0;
static int opt_trim = 0;
static int opt_calls = 0;
static int opt_self = 1; /* 0=exclude drhook altogether,
1=include, but don't print,
2=also print */
static int opt_propagate_signals = 1;
static int opt_sizeinfo = 1;
static int opt_clusterinfo = 0;
static int opt_callpath = 0;
#define callpath_indent_default 2
static int callpath_indent = callpath_indent_default;
#define callpath_depth_default 50
static int callpath_depth = callpath_depth_default;
static int callpath_packed = 0;
static int opt_calltrace = 0;
static int opt_funcenter = 0;
static int opt_funcexit = 0;
static int opt_timeline = 0; /* myproc or -1 [or 0 for --> timeline feature off (default)] */
static int opt_timeline_thread = 1; /* thread-id control :
<= 0 print for all threads
1 -> #1 only [but curheap still SUM of all threads] (default),
n -> print for increasing number of threads separately : [1..n] */
static int opt_timeline_format = 1; /* if 1, print only {wall,hwm,rss,curheap} w/o labels "wall=" etc.; else fully expanded fmt */
static int opt_timeline_unitno = 6; /* Fortran unit number : default = 6 i.e. stdout */
static long long int opt_timeline_freq = 1000000; /* How often to print : every n-th call : default = every 10^6 th call or ... */
static double opt_timeline_MB = 1.0; /* ... rss or curheap jumps up/down by more than this many MBytes (default = 1) : unit MBytes */
static volatile sig_atomic_t opt_gencore = 0;
static int opt_gencore_signal = 0;
static int hpm_grp = 0;
static int opt_random_memstat = 0; /* > 0 if to obtain random memory stats (maxhwm, maxstk) for tid=1. Updated when rand() % opt_random_memstat == 0 */
static double opt_trace_stack = 0; /* if > 0, a multiplier for OMP_STACKSIZE to monitor high master thread stack usage --
-- implies opt_random_memstat = 1 (regardless of DR_HOOK_RANDOM_MEMSTAT setting)
-- for master MPI task only (for the moment) */
static long long int drhook_omp_stacksize = 0; /* Slave stack size --
an indicative stack size even master thread should not exceed */
static long long int drhook_stacksize_threshold = 0;
static long long int slave_stacksize();
/* Begin of developer options */
static char *drhook_timed_kill = NULL; /* Timer assisted simulated kill of procs/threads by signal */
static int drhook_dump_maps = 0; /* Print /proc/<tid>/maps from signal handler (before moving to ATP or below) */
static int drhook_dump_smaps = 0; /* Print /proc/<tid>/smaps from signal handler (before moving to ATP or below) */
static int drhook_dump_buddyinfo = 0; /* Print /proc/buddyinfo from signal handler (before moving to ATP or below) */
static int drhook_dump_meminfo = 0; /* Print /proc/meminfo from signal handler (before moving to ATP or below) */
static int drhook_dump_hugepages = 0;
static double drhook_dump_hugepages_freq = 0;
/* End of developer options */
typedef struct drhook_timeline_t {
unsigned long long int calls[2]; /* 0=drhook_begin , 1=drhook_end */
double last_curheap_MB;
double last_rss_MB;
double last_stack_MB;
double last_vmpeak_MB;
//#if CACHELINESIZE > (2*sizeof(unsigned long long int) + 4*sizeof(double)) -- disallowed
#if CACHELINESIZE > (2*8 + 4*8)
char pad[CACHELINESIZE - (2*sizeof(unsigned long long int) + 4*sizeof(double))]; /* padding : e.g. 64 bytes - 6*8 bytes */
#endif
} drhook_timeline_t; /* cachelinesize optimized --> less false sharing when running with OpenMP */
static drhook_timeline_t *timeline = NULL;
/* HPM-specific */
static long long int opt_hpmstop_threshold = -1;
static double opt_hpmstop_mflops = 1000000.0; /* Yes, 1 PetaFlop/s !! */
#define DRHOOK_STRBUF 1000
#ifndef SA_SIGINFO
#define SA_SIGINFO 0
#define SIG_EXTRA_ARGS /* empty */
#define SIG_PASS_EXTRA_ARGS /* empty */
#else
#define SIG_EXTRA_ARGS , siginfo_t *sigcode, void *sigcontextptr
#define SIG_PASS_EXTRA_ARGS , sigcode, sigcontextptr
#endif
#define NIL "(nil)"
#undef MIN
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
#undef MAX
#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
#undef ABS
#define ABS(x) ( (x) >= 0 ? (x) : -(x) )
#define strequ(s1,s2) ((void *)s1 && (void *)s2 && strcmp(s1,s2) == 0)
#define strnequ(s1,s2,n) ((void *)s1 && (void *)s2 && memcmp(s1,s2,n) == 0)
extern long long int getstk_();
extern long long int getmaxstk_();
extern long long int gethwm_();
extern long long int getmaxhwm_();
extern long long int getrss_();
extern long long int getmaxrss_();
extern long long int getcurheap_();
extern long long int getmaxcurheap_();
extern long long int getcurheap_thread_(const int *tidnum); /* *tidnum >= 1 && <= max_threads */
extern long long int getmaxcurheap_thread_(const int *tidnum); /* *tidnum >= 1 && <= max_threads */
extern long long int getpag_();
extern long long int getvmpeak_();
extern void ec_set_umask_();
#if defined(DT_FLOP)
extern double flop_();
#endif
extern double util_cputime_();
extern double util_walltime_();
#ifdef RS6K
static long long int irtc_start = 0;
extern long long int irtc();
#define WALLTIME() ((double)(irtc() - irtc_start)*1.0e-9)
#define CPUTIME() util_cputime_()
#elif defined(CRAYXT)
/* Cray XT3/XT4 with catamount microkernel */
#include <catamount/dclock.h>
static double dclock_start = 0;
#define WALLTIME() (dclock() - dclock_start)
#define CPUTIME() WALLTIME()
#else
#if defined(SV2)
#include <intrinsics.h>
#endif
#if defined(XD1) || defined(XT3)
extern long long int irtc_(); /* integer*8 irtc() */
extern long long int irtc_rate_(); /* integer*8 irtc_rate() */
#endif
#if defined(SV2) || defined(XD1) || defined(XT3)
static long long int irtc_start = 0;
static double my_irtc_rate = 0;
static double my_inv_irtc_rate = 0;
#if defined(SV2)
#define WALLTIME() ((double)(_rtc() - irtc_start)*my_inv_irtc_rate)
#else
#define WALLTIME() ((double)(irtc_() - irtc_start)*my_inv_irtc_rate)
#endif
#define CPUTIME() util_cputime_()
#else
#define WALLTIME() util_walltime_()
#define CPUTIME() util_cputime_()
#endif
#endif
/* #define RAISE(x) { int tmp = x; c_drhook_raise_(&tmp); } */
#include "raise.h"
#include "cargs.h"
extern void LinuxTraceBack(const char *prefix, const char *timestr, void *sigcontextptr);
/*** typedefs ***/
typedef union {
struct drhook_key_t *keyptr;
double d;
unsigned long long int ull;
} equivalence_t;
typedef struct drhook_key_t {
char *name;
unsigned short name_len;
const equivalence_t *callpath; /* parent's tree down to callpath_depth */
int callpath_len;
unsigned int callpath_fullhash;
unsigned short status; /* 0=inactive, >1 active */
unsigned long long int calls;
long long int hwm, maxrss, rssnow, stack, maxstack, paging;
double wall_in, delta_wall_all, delta_wall_child;
double cpu_in, delta_cpu_all, delta_cpu_child;
#ifdef HPM
unsigned char hpm_stopped, counter_stopped;
double this_delta_wall_child;
double avg_mipsrate, avg_mflops;
unsigned long long int hpm_calls;
double mip_count_in, mflop_count_in;
long long int *counter_in, *counter_sum;
#endif
char *filename; /* the filename where the 1st call (on this routine-name)
to dr_hook() occurred */
long long int sizeinfo; /* # of data elements, bytes, etc. */
long long int min_sizeinfo, max_sizeinfo; /* min & max of # of data elements, bytes, etc. */
/* memprof specific */
long long int mem_seenmax;
long long int mem_child, mem_curdelta;
long long int maxmem_selfdelta, maxmem_alldelta;
long long int mem_maxhwm, mem_maxrss, mem_maxstk, mem_maxpagdelta;
long long int paging_in;
unsigned long long int alloc_count, free_count;
struct drhook_key_t *next;
} drhook_key_t;
typedef struct drhook_calltree_t {
int active;
drhook_key_t *keyptr;
struct drhook_calltree_t *next;
struct drhook_calltree_t *prev;
} drhook_calltree_t;
typedef struct drhook_sig_t {
char name[32];
struct sigaction new;
struct sigaction old;
int active;
int ignore_atexit;
} drhook_sig_t;
typedef union {
void (*func1args)(int sig);
void (*func3args)(int sig SIG_EXTRA_ARGS);
} drhook_sigfunc_t;
typedef struct drhook_prof_t {
double pc;
double total;
double self;
unsigned long long int calls;
double percall_ms_self;
double percall_ms_total;
double mipsrate, mflops, divpc;
int index;
int tid;
int cluster;
double *maxval;
unsigned char is_max;
char *name;
char *filename;
long long int sizeinfo;
long long int min_sizeinfo, max_sizeinfo;
double sizespeed, sizeavg;
const equivalence_t *callpath; /* parent's tree down to callpath_depth */
int callpath_len;
} drhook_prof_t;
typedef struct drhook_memprof_t {
double pc;
long long int self;
long long int children;
long long int hwm, rss, stk, pag, leaked;
unsigned long long int calls, alloc_count, free_count;
int index;
int tid;
int cluster;
long long int *maxval;
unsigned char is_max;
char *name;
char *filename;
const equivalence_t *callpath; /* parent's tree down to callpath_depth */
int callpath_len;
} drhook_memprof_t;
#define MAX_WATCH_FIRST_NBYTES 8
typedef struct drhook_watch_t {
char *name;
int tid;
int active;
int abort_if_changed;
const char *ptr;
int nbytes;
int watch_first_nbytes;
char first_nbytes[MAX_WATCH_FIRST_NBYTES];
unsigned int crc32;
int printkey;
int nvals;
struct drhook_watch_t *next;
} drhook_watch_t;
/*** static (local) variables ***/
static o_lock_t DRHOOK_lock = 0;
static pid_t pid = -1;
static drhook_key_t **keydata = NULL;
static drhook_calltree_t **calltree = NULL;
static drhook_calltree_t **thiscall = NULL;
static int signals_set = 0;
static volatile sig_atomic_t signal_handler_called = 0;
static volatile sig_atomic_t signal_handler_ignore_atexit = 0;
static volatile sig_atomic_t unlimited_corefile_retcode = 9999;
static volatile unsigned long long int saved_corefile_hardlimit = 0;
static int allow_coredump = -1; /* -1 denotes ALL MPI-tasks, 1..NPES == myproc, 0 = coredump will not be enabled by DrHook at init */
static drhook_sig_t siglist[1+NSIG] = { 0 };
static char *a_out = NULL;
static char *mon_out = NULL;
static int mon_out_procs = -1;
static double percent_limit = -10; /* Lowest percentage accepted into the printouts */
static drhook_key_t **keyself = NULL; /* pointers to itself (per thread) */
static double *overhead; /* Total Dr.Hook-overhead for every thread in either WALL or CPU secs */
static drhook_key_t **curkeyptr = NULL; /* pointers to current keyptr (per thread) */
static drhook_watch_t *watch = NULL;
static drhook_watch_t *last_watch = NULL;
static int watch_count = 0; /* No. of *active* watch points */
#ifndef SYS_gettid
#define SYS_gettid __NR_gettid
#endif
static pid_t gettid() {
#if defined(DARWIN)
uint64_t tid64;
pthread_threadid_np(NULL, &tid64);
pid_t tid = (pid_t)tid64;
#else
pid_t tid = syscall(SYS_gettid);
#endif
return tid;
}
// Fortran callable : CALL GETTID_C(ITID) where INTEGER(KIND=4) :: ITID
void gettid_c_(int *tid)
{
if (tid) *tid = (int)gettid();
}
void gettid_c(int *tid) { gettid_c_(tid); }
static void set_ec_drhook_label(const char *hostname, int hlen)
{
int tid = get_thread_id_();
int j = tid - 1;
int slen = sizeof(ec_drhook[j].s);
pid_t unixtid = gettid();
snprintf(ec_drhook[j].s,slen,"[EC_DRHOOK:%*s:%d:%d:%lld:%lld]",
hlen,hostname,myproc,tid,
(long long int)pid, (long long int)unixtid);
}
#define SECS(x) ((int)(x))
#define NSECS(x) ((int)(1000000000 * ((x) - SECS(x))))
#ifndef __timer_t_defined
static void set_killer_timer(const int *ntids, const int *target_omptid,
const int *target_sig, const double *start_time,
const char *p, int lenp)
{
// Definition of timer_t, timer_create, timer_set
// is a POSIX extention, not available on e.g. Darwin
}
#else
static void set_killer_timer(const int *ntids, const int *target_omptid,
const int *target_sig, const double *start_time,
const char *p, int lenp)
{
static volatile sig_atomic_t TimedKill = 0;
if (ntids && target_omptid && target_sig && start_time && p) {
int tid = get_thread_id_();
if (*target_omptid == -1 || *target_omptid == tid) {
char *pfx = PREFIX(tid);
timer_t timerid = { 0 };
struct itimerspec its = { 0 } ;
struct sigevent sev = { 0 } ;
sev.sigev_signo = *target_sig;
#if defined(SIGEV_THREAD_ID)
sev.sigev_notify = SIGEV_THREAD_ID | SIGEV_SIGNAL;
/* sev.sigev_notify_thread_id = gettid(); */
sev._sigev_un._tid = gettid();
#elif defined(SIGEV_THREAD)
sev.sigev_notify = SIGEV_THREAD | SIGEV_SIGNAL;
#else
sev.sigev_notify = SIGEV_SIGNAL;
#endif
sev.sigev_value.sival_ptr = &timerid;
its.it_value.tv_sec = SECS(*start_time);
its.it_value.tv_nsec = NSECS(*start_time);
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 0;
timer_create(CLOCK_MONOTONIC, &sev, &timerid);
/* timer_create(CLOCK_REALTIME, &sev, &timerid); */
timer_settime(timerid, 0, &its, NULL);
cas_lock(&TimedKill);
{
fprintf(stderr,
"%s %s [%s@%s:%d] Developer timer (%s) expires"
" after %.3fs through signal#%d (ntids=%d)\n",
pfx,TIMESTR(tid),FFL,
p,
*start_time, *target_sig, *ntids);
fflush(NULL);
}
cas_unlock(&TimedKill);
} /* if (target_omptid == -1 || target_omptid == tid) */
}
}
#endif
#if !defined(NCALLSTACK)
#ifdef PARKIND1_SINGLE
/* > 0 : USE call stack approach : needed for single precision version */
#define NCALLSTACK 64
#else
/* == 0 : do NOT use call stack approach : usually for double precision version */
#define NCALLSTACK 0
#endif
#endif
static int cstklen = NCALLSTACK;
#define HASHSIZE(n) ((unsigned int)1<<(n))
#define HASHMASK(n) (HASHSIZE(n)-1)
#define NHASH 16
#define NHASHMAX 24
static int nhash = NHASH;
static unsigned int hashsize = HASHSIZE(NHASH);
static unsigned int hashmask = HASHMASK(NHASH);
#ifdef HPM
/* HPM-specific (static) protos */
static void stopstart_hpm(int tid, drhook_key_t *pstop, drhook_key_t *pstart);
static void stop_only_hpm(int tid, drhook_key_t *pstop);
static void init_hpm(int tid);
static double mflops_hpm(const drhook_key_t *keyptr);
static double mips_hpm(const drhook_key_t *keyptr);
static double divpc_hpm(const drhook_key_t *keyptr);
static double mflop_count(const drhook_key_t *keyptr);
static double mip_count(const drhook_key_t *keyptr);
#else
/* Dummies for HPM as macros that do nothing */
#define stopstart_hpm(tid, pstop, pstart)
#define stop_only_hpm(tid, pstop)
#define init_hpm(tid)
#define mflops_hpm(keyptr) 0
#define mips_hpm(keyptr) 0
#define divpc_hpm(keyptr) 0
#define mflop_count(keyptr) 0
#define mip_count(keyptr) 0
#endif
/*--- spin ---*/
static int nanospin(int secs, int nanosecs) {
struct timespec req, rem;
req.tv_sec = secs;
req.tv_nsec = nanosecs;
return nanosleep(&req, &rem);
}
static int spin(int secs) {
return nanospin(secs, 0);
}
/*--- dump_file ---*/
static void dump_file(const char *pfx, int tid, int sig, int nsigs, const char filename[])
{
/* Developer option: Will this spoil our ATP trace ... ? */
FILE *fp;
char in[256];
char *tst = TIMESTR(tid);
if (sig > 0 && nsigs >= 1) {
fprintf(stderr,
"%s %s [%s@%s:%d] Content of the file '%s', signal#%d, nsigs = %d\n",
pfx,tst,FFL,filename,sig,nsigs);
}
else {
fprintf(stderr,
"%s %s [%s@%s:%d] Content of the file '%s'\n",
pfx,tst,FFL,filename);
}
fp = fopen(filename,"r");
if (fp) {
while (fgets(in,sizeof(in),fp) == in) {
fprintf(stderr,"%s %s [%s@%s:%d] %s",pfx,tst,FFL,in);
/* fprintf(stderr,"%s",in); */
}
fclose(fp);
}
}
/*--- dump_hugepages ---*/
// Forward declaration of subroutine in ec_meminfo.F90
void ec_meminfo_( const int* ku,
const char* cdstring,
const int* kcomm,
const int* kbarr,
const int* kiotask,
const int* kcall,
int cdstring_strlen );
static void dump_hugepages(int enforce, const char *pfx, int tid, int sig, int nsigs)
{
if (enforce || drhook_dump_hugepages) {
if (enforce || tid == 1) { /* OML-thread id >= 1 */
static double next_scheduled = -1;
double wt = WALLTIME();
if (enforce || wt > next_scheduled) {
const int kcomm = -1;
const int kbarr = 0;
const int kiotask = 0;
const int kcall = -1;
const int ftnunitno = 0; /* stderr */
fflush(NULL);
ec_meminfo_(&ftnunitno,pfx,&kcomm,&kbarr,&kiotask,&kcall,strlen(pfx));
fflush(NULL);
if (drhook_dump_buddyinfo) {
dump_file(pfx,tid,sig,nsigs,"/proc/buddyinfo");
}
if (drhook_dump_meminfo) {
dump_file(pfx,tid,sig,nsigs,"/proc/meminfo");
}
wt = WALLTIME();
next_scheduled = wt + drhook_dump_hugepages_freq;
}
}
}
}
/*--- set_default_handler ---*/
static int set_unlimited_corefile(unsigned long long int *hardlimit);
static int set_default_handler(int sig, int unlimited_corefile, int verbose)
{
int rc = -2;
if (sig >= 1 && sig <= NSIG) {
unsigned long long int hardlimit = 0;
struct sigaction sa = { 0 };
sa.sa_handler = SIG_DFL;
sigemptyset(&sa.sa_mask);
/*
sigfillset(&sa.sa_mask); -- if we wanted to block all (catchable) signals whilst in subsequent signal handler SIG_DFL
sigaddset(&sa.sa_mask, some_signal_to_be_blocked); ... just in case
*/
sigaction(sig, &sa, NULL);
if (unlimited_corefile) rc = set_unlimited_corefile(&hardlimit); /* unconditionally */
if (verbose) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
char buf[128] = "";
if (unlimited_corefile && rc == 0) snprintf(buf,sizeof(buf)," -- hardlimit for core file is now %llu (0x%llx)", hardlimit, hardlimit);
fprintf(stderr,
"%s %s [%s@%s:%d] "
"Enabled default signal handler (SIG_DFL) for signal#%d%s\n",
pfx,TIMESTR(tid),FFL,
sig,buf);
}
}
return rc;
}
/*--- malloc_drhook ---*/
static void *
malloc_drhook(size_t size)
{
size_t size1 = MAX(1,size);
void *p = malloc(size1);
if (!p) {
fprintf(stderr,
"***Error in malloc_drhook(): Unable to allocate space for %lld bytes\n",
(long long int)size1);
RAISE(SIGABRT);
}
return p;
}
/*--- calloc_drhook ---*/
static void *
calloc_drhook(size_t nmemb, size_t size)
{
size_t n = nmemb * size;
void *p = malloc_drhook(n);
memset(p,0,n);
return p;
}
/*--- free_drhook ---*/
#define free_drhook(x) { if (x) { free(x); x = NULL; } }
/*--- callstack ---*/
/* Note: For single precision calls -- small performance penalty */
typedef struct callstack_t {
drhook_key_t **keyptr;
unsigned int next;
unsigned int maxdepth;
} callstack_t;
static callstack_t **cstk = NULL;
static drhook_key_t *callstack(int tid, void *key, drhook_key_t *keyptr)
{
/* Single routine -- two usages:
(1) Upon c_drhook_start_() we call:
(void) callstack(tid, key, u.keyptr);
- store keyptr into thread specific call stack
- fill *key up to 4-bytes index stating the position in the aforementioned call stack
(2) Upon c_drhook_end_() we call:
u.keyptr = callstack(tid, (void *)key, NULL);
- pass 4-byte index in
- obtain keyptr from call stack
- decrement call stack
*/
static const unsigned int inc = 64;
unsigned int idx, *Index = key;
callstack_t *c = cstk[tid-1];
if (keyptr) {
if (!c) {
cstk[tid-1] = c = calloc_drhook(1, sizeof(*c));
c->keyptr = (drhook_key_t **) calloc_drhook(cstklen, sizeof(drhook_key_t *));
c->next = 0;
c->maxdepth = cstklen;
}
idx = (c->next)++;
if (idx >= c->maxdepth) {
drhook_key_t **kptr;
unsigned int maxdepth = idx + inc;
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] "
"Call stack index %u out of range [0,%u) : extending the range to [0,%u) for this thread\n",
pfx,TIMESTR(tid),FFL,
idx,c->maxdepth,maxdepth);
kptr = (drhook_key_t **) calloc_drhook(maxdepth, sizeof(drhook_key_t *));
memcpy(kptr,c->keyptr,c->maxdepth * sizeof(drhook_key_t *));
free_drhook(c->keyptr);
c->keyptr = kptr;
c->maxdepth = maxdepth;
}
if (idx >= c->maxdepth) {
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] "
"Call stack index %u still out of range [0,%u). Aborting ...\n",
pfx,TIMESTR(tid),FFL,
idx,c->maxdepth);
RAISE(SIGABRT);
}
c->keyptr[idx] = keyptr;
*Index = idx;
}
else {
idx = --(c->next);
if (idx != *Index) {
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] "
"Invalid index to call stack %u : out of range [0,%u). Expecting the exact value of %u\n",
pfx,TIMESTR(tid),FFL,
idx,c->maxdepth,*Index);
RAISE(SIGABRT);
}
keyptr = c->keyptr[idx];
}
return keyptr;
}
/*--- strdup_drhook ---*/
static char *
strdup_drhook(const char *s)
{
int n = strlen(s);
char *p = malloc_drhook(n+1);
memcpy(p,s,n);
p[n] = 0;
return p;
}
/*--- strdup2_drhook ---*/
static char *
strdup2_drhook(const char *s, int s_len)
{
int n = s_len;
char *p = malloc_drhook(n+1);
memcpy(p,s,n);
p[n] = 0;
return p;
}
/*--- timestamp ---*/
static char *
timestamp()
{
time_t tp;
const int bufsize = 64;
char *buf = malloc_drhook(bufsize+1);
time(&tp);
strftime(buf, bufsize, "%Y%m%d %H%M%S", localtime(&tp));
return buf;
}
/*--- TimeStr ---*/
static char *
TimeStr(char *s, int slen)
{
if (s) {
time_t tp;
char buf[64];
time(&tp);
strftime(buf, sizeof(buf), "%Y%m%d:%H%M%S", localtime(&tp));
snprintf(s,slen,"[%s:%lld:%.3f]",buf,(long long int)tp,WALLTIME());
}
return s;
}
/* -- These 2 extern's are called primarily from LinuxTrbk() */
const char *drhook_TIMESTR(int tid)
{
static const char fixed[] = "";
if (tid <= 0) coml_my_thread_(&tid);
{
char *s = TIMESTR(tid);
return strlen(s) > 0 ? (const char *)s : fixed;
}
}
const char *drhook_PREFIX(int tid)
{
static const char fixed[] = "";
if (tid <= 0) coml_my_thread_(&tid);
{
char *s = PREFIX(tid);
return strlen(s) > 0 ? (const char *)s : fixed;
}
}
/*--- hashfunc ---*/
unsigned int
hashfunc(const char *s, int s_len)
{
unsigned int hashval;
if (opt_trim) {
for (hashval = 0; s_len>0 ; s++, s_len--) {
unsigned char c = islower(*s) ? toupper(*s) : *s;
hashval = (hashval<<4)^(hashval>>28)^(c);
}
}
else {
for (hashval = s_len; s_len>0 ; s_len--) {
hashval = (hashval<<4)^(hashval>>28)^(*s++);
}
}
hashval = (hashval ^ (hashval>>10) ^ (hashval>>20)) & hashmask;
return hashval;
}
/*--- callpath_hashfunc ---*/
unsigned int
callpath_hashfunc(unsigned int inithash, /* from hashfunc() */
const equivalence_t *callpath, int callpath_len,
unsigned int *fullhash)
{
unsigned int hashval;
for (hashval = inithash; callpath_len>0 ; callpath++, callpath_len--) {
hashval = (hashval<<4)^(hashval>>28)^(callpath->ull);
}
if (fullhash) *fullhash = hashval;
hashval = (hashval ^ (hashval>>10) ^ (hashval>>20)) & hashmask;
return hashval;
}
/*--- insert_calltree ---*/
static void
insert_calltree(int tid, drhook_key_t *keyptr)
{
if (tid >= 1 && tid <= numthreads) {
drhook_calltree_t *treeptr = thiscall[tid-1];
while (treeptr->active) {
if (!treeptr->next) {
treeptr->next = calloc_drhook(1,sizeof(drhook_calltree_t));
treeptr->next->prev = treeptr;
}
treeptr = treeptr->next;
}
treeptr->keyptr = keyptr;
treeptr->active = 1;
thiscall[tid-1] = treeptr;
#ifdef HPM
if (opt_hpmprof) {
drhook_key_t *kptr = treeptr->keyptr;
if (!kptr->hpm_stopped) {
stopstart_hpm(tid,
treeptr->prev ? treeptr->prev->keyptr : NULL, /* stop current (i.e. my parent) */
kptr); /* start to gather for me */
kptr->this_delta_wall_child = 0;
kptr->mip_count_in = mip_count(kptr);
kptr->mflop_count_in = mflop_count(kptr);
#ifdef DEBUG
fprintf(stderr,"insert[%.*s@%d]: this_delta_wall_child=%.15g, mip#%.15g, mflop#%.15g\n",
kptr->name_len,kptr->name,
tid,kptr->this_delta_wall_child,
kptr->mip_count_in,kptr->mflop_count_in);
#endif
}
else {
stop_only_hpm(tid,
treeptr->prev ? treeptr->prev->keyptr : NULL /* stop current (i.e. my parent) */);
} /* if (!kptr->hpm_stopped) else */
} /* if (opt_hpmprof) */
#endif
}
}
/*--- remove_calltree ---*/
static void
remove_calltree(int tid, drhook_key_t *keyptr,
const double *delta_wall, const double *delta_cpu)
{
if (tid >= 1 && tid <= numthreads) {
drhook_calltree_t *treeptr = thiscall[tid-1];
if (treeptr->active && treeptr->keyptr == keyptr) {
treeptr->active = 0;
if (treeptr->prev) {
drhook_key_t *parent_keyptr = treeptr->prev->keyptr;
if (parent_keyptr) { /* extra security */
if (opt_walltime) {
parent_keyptr->delta_wall_child += (*delta_wall);
#ifdef HPM
if (opt_hpmprof) parent_keyptr->this_delta_wall_child += (*delta_wall);
#endif
}
if (opt_cputime) {
parent_keyptr->delta_cpu_child += (*delta_cpu);
}
if (opt_memprof) {
/*
const long long int size = 0;
c_drhook_memcounter_(&tid, &size, NULL);
fprintf(stderr,
">parent(%.*s)->mem_child = %lld ; this(%.*s)->alldelta = %lld, mem_child = %lld\n",
parent_keyptr->name_len, parent_keyptr->name, parent_keyptr->mem_child,
keyptr->name_len, keyptr->name, keyptr->maxmem_alldelta, keyptr->mem_child);
*/
parent_keyptr->mem_child = MAX(parent_keyptr->mem_child, keyptr->maxmem_alldelta);
/*
fprintf(stderr,
"<parent(%.*s)->mem_child = %lld ; this(%.*s)->alldelta = %lld, mem_child = %lld\n",
parent_keyptr->name_len, parent_keyptr->name, parent_keyptr->mem_child,
keyptr->name_len, keyptr->name, keyptr->maxmem_alldelta, keyptr->mem_child);
*/
}
} /* if (parent_keyptr) */
thiscall[tid-1] = treeptr->prev;
}
else {
thiscall[tid-1] = calltree[tid-1];
}
#ifdef HPM
if (opt_hpmprof) {
drhook_key_t *kptr = treeptr->keyptr;
if (!kptr->hpm_stopped) {
double this_delta_wall_self = *delta_wall - kptr->this_delta_wall_child;
stopstart_hpm(tid,
kptr,
thiscall[tid-1]->keyptr); /* stop current, (re-)start previous */
/* Calculate moving average of mipsrate & mflops ; divpc we don't bother */
#ifdef DEBUG
fprintf(stderr,"remove[%.*s@%d]: this_delta_wall_self=%.15g i.e. %.15g - %.15g",
kptr->name_len,kptr->name,
tid,this_delta_wall_self,
*delta_wall,kptr->this_delta_wall_child);
#endif
if (this_delta_wall_self > 0) {
long long int hpm_calls = ++kptr->hpm_calls;
double mipsrate, mflops;
kptr->mip_count_in = mip_count(kptr) - kptr->mip_count_in;
kptr->mflop_count_in = mflop_count(kptr) - kptr->mflop_count_in;
mipsrate = kptr->mip_count_in/this_delta_wall_self;
kptr->avg_mipsrate = ((hpm_calls-1)*kptr->avg_mipsrate + mipsrate)/hpm_calls;
mflops = kptr->mflop_count_in/this_delta_wall_self;
kptr->avg_mflops = ((hpm_calls-1)*kptr->avg_mflops + mflops)/hpm_calls;
#ifdef DEBUG
fprintf(stderr,
", mip#%.15g, mflop#%.15g : mipsrate=%.15g, avg=%.15g; mflops=%.15g, avg=%.15g",
kptr->mip_count_in,kptr->mflop_count_in,
mipsrate, kptr->avg_mipsrate,
mflops, kptr->avg_mflops);
#endif
}
#ifdef DEBUG
fprintf(stderr,"\n");
#endif
if (opt_hpmstop_threshold > 0 && kptr->calls == opt_hpmstop_threshold) {
/* check whether hpm should anymore be called for this routine */
if (kptr->avg_mflops < opt_hpmstop_mflops) kptr->hpm_stopped = 1;
}
}
else {
stop_only_hpm(tid,kptr);
} /* if (!kptr->hpm_stopped) else ... */
} /* if (opt_hpmprof) */
#endif
curkeyptr[tid-1] = thiscall[tid-1]->keyptr;
}
else {
curkeyptr[tid-1] = NULL;
} /* if (treeptr->active && treeptr->keyptr == keyptr) else ... */
}
}
/*--- memstat ---*/
static long long int
slave_stacksize()
{
char *env_omp = getenv("OMP_STACKSIZE");
long long int stacksize = env_omp ? atoll(env_omp) : 0;
if (env_omp) {
if (strchr(env_omp,'G')) stacksize *= (long long int)1073741824; /* hence, in GiB */
else if (strchr(env_omp,'M')) stacksize *= (long long int)1048576; /* hence, in MiB */
else if (strchr(env_omp,'K')) stacksize *= (long long int)1024; /* hence, in KiB */
}
if (stacksize < 0) stacksize = 0;
return stacksize;
}
static void
memstat(drhook_key_t *keyptr, const int *thread_id, int in_getkey)
{
if (any_memstat && keyptr) {
if (opt_gethwm) keyptr->hwm = gethwm_();
if (opt_getrss) {
keyptr->maxrss = getrss_();
keyptr->rssnow = getcurheap_thread_(thread_id);
}
if (opt_getstk) {
long long int stk = getstk_();
keyptr->stack = stk;
keyptr->maxstack = MAX(keyptr->maxstack,stk);
}
if (opt_getpag) keyptr->paging = getpag_();
if (opt_memprof) {
keyptr->mem_seenmax = getmaxcurheap_thread_(thread_id);
if (in_getkey) { /* Upon enter of a Dr.Hook'ed routine */
/* A note for "keyptr->mem_curdelta":
1) do not reset to 0
2) initially calloc'ed to 0 while initializing the keydata[] ~ alias keyptr
3) remember the previous value --> catches memory leaks, too !! */
/* keyptr->mem_curdelta = 0; */
/* Nearly the same holds for "keyptr->mem_child";
we need to capture the maximum/hwm for child */
/* keyptr->mem_child = 0; */
keyptr->paging_in = keyptr->paging;
}
else { /* Upon exit of a Dr.Hook'ed routine */
long long int alldelta = keyptr->mem_curdelta + keyptr->mem_child;
if (alldelta > keyptr->maxmem_alldelta) keyptr->maxmem_alldelta = alldelta;
if (keyptr->paging - keyptr->paging_in > keyptr->mem_maxpagdelta)
keyptr->mem_maxpagdelta = keyptr->paging - keyptr->paging_in;
}
if (keyptr->hwm > keyptr->mem_maxhwm) keyptr->mem_maxhwm = keyptr->hwm;
if (keyptr->maxrss > keyptr->mem_maxrss) keyptr->mem_maxrss = keyptr->maxrss;
if (keyptr->maxstack > keyptr->mem_maxstk) keyptr->mem_maxstk = keyptr->maxstack;
}
}
}
/*--- flptrap ---*/
/*
-----------------------------------------------------------------------
If we are trapping Floating-Point Error, then set the processor in SYNC
modes and enable TRP_INVALID, TRP_DIV_BY_ZERO and TRP_OVERFLOW.
-----------------------------------------------------------------------
*/
#ifdef RS6K
static void
flptrap(int sig, int silent)
{
if (sig == SIGFPE) {
/* From John Hague, IBM, UK (--> thanks a lot, John !!)*/
int ret = fp_trap(FP_TRAP_FASTMODE);
if ((ret == FP_TRAP_UNIMPL) || (ret == FP_TRAP_ERROR)) {
char errmsg[4096];
sprintf(errmsg,
"flptrap(): Call to 'fp_trap' in signal_trap failed (return code = %d)\n (line %d in file %s)\n",
ret, __LINE__, __FILE__);
perror(errmsg);
RAISE(SIGABRT);
}
fp_enable(TRP_INVALID | TRP_DIV_BY_ZERO | TRP_OVERFLOW);
}
}
#elif defined(__GNUC__) && !defined(NO_TRAPFPE)
static void
flptrap(int sig, int silent)
{
if (sig == SIGFPE) {
/* Adapted from www.twinkle.ws/arnaud/CompilerTricks.html#Glibc_FP */
trapfpe(silent); /* No need for pgf90's -Ktrap=fp now ? */
}
}
#else
static void
flptrap(int sig, int silent)
{
return; /* A dummy */
}
#endif
static void signal_gencore(int sig SIG_EXTRA_ARGS);
static void signal_harakiri(int sig SIG_EXTRA_ARGS);
static void signal_drhook(int sig SIG_EXTRA_ARGS);
static void trapfpe_treatment(int sig, int silent);
/*--- catch_signals ---*/
#define CATCHSIG(x) {\
drhook_sig_t *sl = &siglist[x];\
if (sl->active == 0) {\
drhook_sigfunc_t u;\
u.func3args = signal_drhook;\
sl->active = 1;\
sigemptyset(&sl->new.sa_mask);\
sl->new.sa_handler = u.func1args;\
sl->new.sa_flags = SA_SIGINFO;\
sigaction(x,&sl->new,&sl->old);\
trapfpe_treatment(x,silent); \
if (!silent && myproc == 1) {\
int tid = get_thread_id_(); \
char *pfx = PREFIX(tid); \
fprintf(stderr,\
"%s %s [%s@%s:%d] DR_HOOK also catches signal#%d : New handler '%s' installed at %p (old at %p)\n", \
pfx,TIMESTR(tid),FFL, \
x, "signal_drhook", sl->new.sa_handler, sl->old.sa_handler); \
}\
}\
}
static void
catch_signals(int silent)
{
char *env = getenv("DR_HOOK_CATCH_SIGNALS");
if (!silent && myproc == 1) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK_CATCH_SIGNALS=%s\n",
pfx,TIMESTR(tid),FFL,
env ? env : "<undef>");
}
if (env) {
const char delim[] = ", \t/";
char *p, *s = strdup_drhook(env);
p = strtok(s,delim);
while (p) {
int sig = atoi(p);
if (sig >= 1 && sig <= NSIG) {
CATCHSIG(sig);
}
else if (sig == -1) { /* Makes ALL (catchable) signals available to DR_HOOK */
int j;
for (j=1; j<=NSIG; j++) {
CATCHSIG(j);
} /* for (j=1; j<=NSIG; j++) */
break;
}
p = strtok(NULL,delim);
}
free_drhook(s);
}
}
/*--- trapfpe_treatment ---*/
static void
trapfpe_treatment(int sig, int silent)
{
if (sig == SIGFPE) {
#if defined(__GNUC__) && !defined(NO_TRAPFPE)
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
if (drhook_trapfpe) {
if (!silent && myproc == 1) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK enables SIGFPE-related floating point trapping since DRHOOK_TRAPFPE=%d\n",
pfx,TIMESTR(tid),FFL,
drhook_trapfpe);
}
flptrap(sig,silent); /* Has FLP-trapping on, regardless */
}
else {
if (!silent && myproc == 1) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK turns SIGFPE-related floating point trapping off since DRHOOK_TRAPFPE=%d\n",
pfx,TIMESTR(tid),FFL,
drhook_trapfpe);
}
untrapfpe(silent); /* Turns off a possible -Ktrap=fp from pgf90 */
}
#endif
}
}
/* Fortran callable : calls trapfpe() for slave threads if drhook_trapfpe indicated so
Called from DR_HOOK_UTIL_MULTI after DR_HOOK_UTIL (master thread) has been called
Matters only for slave threads
If *silent = 0, then more verbose output */
void
trapfpe_slave_threads_(const int *silent)
{
int tid = get_thread_id_();
if (tid > 1) { // slave threads
if (drhook_trapfpe_master_init) trapfpe_treatment(SIGFPE, *silent);
}
}
void
trapfpe_slave_threads(const int *silent)
{
trapfpe_slave_threads_(silent);
}
/*--- restore_default_signals ---*/
static void
restore_default_signals(int silent)
{
char *env = getenv("DR_HOOK_RESTORE_DEFAULT_SIGNALS");
if (!silent && myproc == 1) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK_RESTORE_DEFAULT_SIGNALS=%s\n",
pfx,TIMESTR(tid),FFL,
env ? env : "<undef>");
}
if (env) {
int unlim_core = 1;
const char delim[] = ", \t/";
char *p, *s = strdup_drhook(env);
p = strtok(s,delim);
while (p) {
int sig = atoi(p);
if (sig >= 1 && sig <= NSIG) {
drhook_sig_t *sl = &siglist[sig];
if (sl->active == 0) { /* Not touched yet by ignore_signals() */
set_default_handler(sig,unlim_core,(!silent && myproc == 1));
unlim_core = 0;
if (sig == SIGFPE) trapfpe_treatment(sig, (!silent && myproc == 1));
sl->active = -2;
}
}
else if (sig == -1) { /* Restore default signals for all available/catchable to DR_HOOK */
int j;
for (j=1; j<=NSIG; j++) {
drhook_sig_t *sl = &siglist[j];
if (sl->active == 0) { /* Not touched yet by ignore_signals() */
set_default_handler(j,unlim_core,(!silent && myproc == 1));
unlim_core = 0;
if (j == SIGFPE) trapfpe_treatment(j, (!silent && myproc == 1));
sl->active = -2;
}
} /* for (j=1; j<=NSIG; j++) */
break;
}
p = strtok(NULL,delim);
}
free_drhook(s);
}
}
/*--- ignore_signals ---*/
static void
ignore_signals(int silent)
{
char *env = getenv("DR_HOOK_IGNORE_SIGNALS");
if (!silent && myproc == 1) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK_IGNORE_SIGNALS=%s\n",
pfx,TIMESTR(tid),FFL,
env ? env : "<undef>");
}
if (env) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
const char delim[] = ", \t/";
char *p, *s = strdup_drhook(env);
p = strtok(s,delim);
while (p) {
int sig = atoi(p);
if (sig >= 1 && sig <= NSIG) {
drhook_sig_t *sl = &siglist[sig];
if (!silent && myproc == 1) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK ignores signal#%d altogether\n",
pfx,TIMESTR(tid),FFL,
sig);
}
sl->active = -1;
}
else if (sig == -1) { /* Switches off ALL signals from DR_HOOK */
int j;
for (j=1; j<=NSIG; j++) {
drhook_sig_t *sl = &siglist[j];
if (!silent && myproc == 1) {
fprintf(stderr,
"%s %s [%s@%s:%d] DR_HOOK ignores signal#%d altogether\n",
pfx,TIMESTR(tid),FFL,
j);
}
sl->active = -1;
} /* for (j=1; j<=NSIG; j++) */
break;
}
p = strtok(NULL,delim);
}
free_drhook(s);
}
}
/*--- gdb__sigdump ---*/
#if (defined(LINUX) || defined(SUN4)) && !defined(XT3) && !defined(XD1) && !defined(_CRAYC)
static void gdb__sigdump(int sig SIG_EXTRA_ARGS)
{
static int who = 0; /* Current owner of the lock, if > 0 */
int is_set = 0;
int it = get_thread_id_();
drhook_sig_t *sl = &siglist[sig];
char *pfx = PREFIX(it);
coml_test_lockid_(&is_set, &DRHOOK_lock);
if (is_set && who == it) {
fprintf(stderr,"%s %s [%s@%s:%d] Received (another) signal#%d (%s)\n",
pfx,TIMESTR(it),FFL,
sig,sl->name);
fprintf(stderr,"%s %s [%s@%s:%d] Recursive calls by the same thread#%d not allowed. Bailing out\n",
pfx,TIMESTR(it),FFL,
it);
return;
}
if (!is_set) coml_set_lockid_(&DRHOOK_lock);
who = it;
fprintf(stderr,"%s %s [%s@%s:%d] Received signal#%d(%s) : sigcontextptr=%p\n",
pfx,TIMESTR(it),FFL,
sig,sl->name,sigcontextptr);
LinuxTraceBack(pfx,TIMESTR(it),sigcontextptr);
/* LinuxTraceBack(pfx,TIMESTR(tid),NULL); */
who = 0;
coml_unset_lockid_(&DRHOOK_lock);
}
#endif
/*--- signal_drhook ---*/
#define SETSIG5(x,ignore_flag,handler_name,preserve_old,xstr) { \
drhook_sig_t *sl = &siglist[x]; \
if (sl->active == 0) { \
drhook_sigfunc_t u; \
u.func3args = handler_name; \
sl->active = 1; \
strcpy(sl->name,xstr); \
sigemptyset(&sl->new.sa_mask); \
sl->new.sa_handler = u.func1args; \
sl->new.sa_flags = SA_SIGINFO; \
sigaction(x,&sl->new,preserve_old ? &sl->old : NULL); \
sl->ignore_atexit = ignore_flag; \
trapfpe_treatment(x,silent); \
if (!silent && myproc == 1) { \
int tid = get_thread_id_(); \
char *pfx = PREFIX(tid); \
const char fmt[] = "%s %s [%s@%s:%d] New signal handler '%s' for signal#%d (%s) at %p (old at %p)\n"; \
fprintf(stderr,fmt, \
pfx,TIMESTR(tid),FFL, \
#handler_name, \
x, sl->name, \
sl->new.sa_handler, \
preserve_old ? sl->old.sa_handler : NULL); \
} \
} \
}
#define SETSIG(x,ignore_flag) SETSIG5(x,ignore_flag,signal_drhook,1,#x)
#define JSETSIG(x,ignore_flag) { \
drhook_sig_t *sl = &siglist[x]; \
drhook_sigfunc_t u; \
/* fprintf(stderr,"JSETSIG: sl->active = %d\n",sl->active); */ \
u.func3args = signal_harakiri; \
sl->active = 1; \
strcpy(sl->name,#x); \
sigemptyset(&sl->new.sa_mask); \
sl->new.sa_handler = u.func1args; \
sl->new.sa_flags = SA_SIGINFO; \
sigaction(x,&sl->new,&sl->old); \
sl->ignore_atexit = ignore_flag; \
trapfpe_treatment(x,0); \
}
#if 0
{ \
int tid = get_thread_id_(); \
char *pfx = PREFIX(tid); \
const char fmt[] = "%s %s [%s@%s:%d] Harakiri signal handler '%s' for signal#%d (%s) installed at %p (old at %p)\n"; \
fprintf(stderr,fmt, \
pfx,TIMESTR(tid),FFL, \
"signal_harakiri", \
x, sl->name, \
sl->new.sa_handler, \
sl->old.sa_handler); \
} \
#endif
#if defined(RS6K) && defined(__64BIT__)
#define DRH_STRUCT_RLIMIT struct rlimit64
#define DRH_GETRLIMIT getrlimit64
#define DRH_SETRLIMIT setrlimit64
#else
#define DRH_STRUCT_RLIMIT struct rlimit
#define DRH_GETRLIMIT getrlimit
#define DRH_SETRLIMIT setrlimit
#endif
static int set_unlimited_corefile(unsigned long long int *hardlimit)
{
/*
Make sure we *only* set soft-limit (not hard-limit) to 0 in our scripts i.e. :
$ ulimit -S -c 0
but *not*
$ ulimit -c 0
See man ksh or man bash for more
*/
int rc = -1;
if (unlimited_corefile_retcode == 9999) { /* Done only once */
DRH_STRUCT_RLIMIT r;
if (DRH_GETRLIMIT(RLIMIT_CORE, &r) == 0) {
r.rlim_cur = r.rlim_max;
if (DRH_SETRLIMIT(RLIMIT_CORE, &r) == 0) {
saved_corefile_hardlimit = r.rlim_cur;
rc = 0;
}
}
unlimited_corefile_retcode = rc;
}
if (hardlimit) *hardlimit = saved_corefile_hardlimit;
rc = unlimited_corefile_retcode;
return rc;
}
static void
signal_gencore(int sig SIG_EXTRA_ARGS)
{
if (opt_gencore > 0) {
opt_gencore = 0; /* A tiny chance for a race condition between threads */
if (sig == opt_gencore_signal && sig >= 1 && sig <= NSIG) {
signal(sig, SIG_IGN);
signal(SIGABRT, SIG_DFL);
{ /* Enable unlimited cores (up to hard-limit) and call abort() --> generates core dump */
if (set_unlimited_corefile(NULL) == 0) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
fprintf(stderr,
"%s %s [%s@%s:%d] Received signal#%d and now calling abort() ...\n",
pfx,TIMESTR(tid),FFL,
sig);
LinuxTraceBack(pfx,TIMESTR(tid),NULL);
abort(); /* Dump core, too */
}
}
/* Should never end up here */
fflush(NULL);
_exit(128+ABS(sig));
} /* if (sig >= 1 && sig <= NSIG && sig == opt_gencore_signal) */
}
}
static char *safe_llitoa(long long int i, char b[], int blen)
{
char const digit[] = "0123456789";
char *p = b;
long long int shifter;
if (i < 0) {
*p++ = '-';
i *= -1;
}
shifter = i;
do { /* Move to where representation ends */
++p;
shifter = shifter/10;
} while (shifter);
*p = '\0';
do{ /* Move back, inserting digits as u go */
*--p = digit[i%10];
i = i/10;
} while (i);
return b;
}
static void
signal_harakiri(int sig SIG_EXTRA_ARGS)
{
/* A signal handler that will force to exit the current thread immediately for sure */
/* The following output should be malloc-free */
time_t tp;
int idummy;
int fd = fileno(stderr);
int tid = get_thread_id_();
int nsigs = TIDNSIGS(tid);
char *pfx = PREFIX(tid);
char buf[128];
char s[1024];
strcpy(s,pfx);
/* [%s@%s:%d] for FFL below */
strcat(s," [");
strcat(s,__FUNCTION__);
strcat(s,"@");
strcat(s,__FILE__);
strcat(s,":");
strcat(s,safe_llitoa(__LINE__,buf,sizeof(buf)));
strcat(s,"] [epoch=");
time(&tp);
strcat(s,safe_llitoa(tp,buf,sizeof(buf)));
strcat(s,"] Terminating process to avoid hangs due to signal#");
strcat(s,safe_llitoa(sig,buf,sizeof(buf)));
strcat(s," by raising signal SIGKILL = ");
strcat(s,safe_llitoa(SIGKILL,buf,sizeof(buf)));
strcat(s,", nsigs = ");
strcat(s,safe_llitoa(nsigs,buf,sizeof(buf)));
idummy = write(fd,s,strlen(s));
#if 0
batch_kill_();
#endif
raise(SIGKILL); /* Use raise, not RAISE here */
_exit(128+ABS(sig)); /* Should never reach here, bu' in case it does, then ... */
}
static void
signal_drhook(int sig SIG_EXTRA_ARGS)
{
volatile int nfirst = drhook_use_lockfile ? 0 : 1;
int nsigs;
int trace_size;
int tid;
pid_t unixtid;
char *pfx;
void *trace[GNUC_BTRACE];
// Let only one ("fastest") thread per task to this error processing
static volatile sig_atomic_t been_here_already = 0;
static volatile sig_atomic_t thing = 0;
if (sig < 1 || sig > NSIG) return; // .. since have seen this, too :-(
if (been_here_already++ > 0) return; // avoid calling more than once ... since it leads more often than not into troubles
cas_lock(&thing);
trace_size = backtrace(trace, GNUC_BTRACE);
unixtid = gettid();
tid = get_thread_id_();
pfx = PREFIX(tid);
if (signals_set && sig >= 1 && sig <= NSIG) {
drhook_sig_t *sl = &siglist[sig];
sigset_t newmask, oldmask;
/* A tiny chance for a race condition between threads */
// Using compare-and-swap -stuff from the include cas.h (also in ecProf)
/* Signal catching */
{
nsigs = (++signal_handler_called);
if (sl->ignore_atexit) signal_handler_ignore_atexit++;
}
if (ec_drhook && tid >= 1 && tid <= numthreads) ec_drhook[tid-1].nsigs = nsigs; /* Store for possible signal_harakiri() */
/*------------------------------------------------------------
Strategy:
- drhook intercepts most interrupts.
- 1st interupt will
- call alarm(10) to try to make sure 2nd interrupt received
- try to call tracebacks and exit (which includes atexits)
- 2nd (and subsequent) interupts will
- spin for 20 sec (to give 1st interrupt time to complete tracebacks)
- and then call _exit (bypassing atexit)
------------------------------------------------------------*/
/* if (sig != SIGTERM) signal(SIGTERM, SIG_DFL); */ /* Let the default SIGTERM to occur */
coml_get_max_threads_(&max_threads);
if (nsigs == 1) {
/*---- First call to signal handler: call alarm(drhook_harakiri_timeout), tracebacks, exit ------*/
if (!nfirst) {
const char drhook_lockfile[] = "drhook_lock";
if (access(drhook_lockfile,F_OK) == -1) {
int fd = open(drhook_lockfile,O_RDONLY);
if (fd == -1) { // File did not exist -- create it
fd = open(drhook_lockfile, O_CREAT|O_WRONLY|O_TRUNC|O_EXCL, S_IRUSR|S_IWUSR);
if (fd >= 0) {
int rc_lock = flock(fd, LOCK_EX | LOCK_NB);
if (rc_lock == 0) {
size_t count = sizeof(myproc);
ssize_t sz = write(fd,&myproc,count);
if (sz == count) nfirst = 1;
//rc_lock = flock(fd, LOCK_UN);
}
close(fd);
}
}
else { // after all the file already existed
close(fd);
}
}
}
if (nfirst) {
/* Enjoy some output (only from the first guy that came in) */
long long int hwm = gethwm_();
long long int rss = getmaxrss_();
long long int maxstack = getmaxstk_();
long long int vmpeak = getvmpeak_();
long long int pag = getpag_();
rss /= 1048576;
hwm /= 1048576;
maxstack /= 1048576;
vmpeak /= 1048576;
fprintf(stderr,
"%s %s [%s@%s:%d] Received signal#%d (%s) :: %lldMB (heap),"
" %lldMB (maxrss), %lldMB (maxstack), %lldMB (vmpeak), %lld (paging), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig, sl->name, hwm, rss, maxstack, vmpeak, pag, nsigs);
#if 0
fprintf(stderr,
"%s %s [%s@%s:%d] Also activating Harakiri-alarm (SIGALRM=%d) to expire after %ds elapsed to prevent hangs, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
SIGALRM,drhook_harakiri_timeout,nsigs);
#endif
}
JSETSIG(SIGALRM,1); /* This will now set another signal handler than signal_drhook */
fflush(NULL);
alarm(drhook_harakiri_timeout);
#if defined(SA_SIGINFO) && SA_SIGINFO > 0
if (sigcode) {
const char *s = NULL;
void *addr = sigcode->si_addr;
void *bt = addr;
ucontext_t *uc = (ucontext_t *)sigcontextptr;
#ifdef __powerpc64__
bt = uc ? (void *) uc->uc_mcontext.regs->nip : NULL; // Trick from PAPI_overflow()
#elif defined(__x86_64__) && defined(REG_RIP) // gcc specific
bt = uc ? (void *) uc->uc_mcontext.gregs[REG_RIP] : NULL; // RIP: x86_64 specific ; only available in 64-bit mode */
#elif defined(__i386__) && defined(REG_EIP) // gcc specific
bt = uc ? (void *) uc->uc_mcontext.gregs[REG_EIP] : NULL; // EIP: x86 specific ; only available in 32-bit mode */
#endif
if (!addr) addr = bt;
if (sig == SIGFPE) {
switch (sigcode->si_code) {
case FPE_INTDIV: s = "integer divide by zero"; break;
case FPE_INTOVF: s = "integer overflow"; break;
case FPE_FLTDIV: s = "floating-point divide by zero"; break;
case FPE_FLTOVF: s = "floating-point overflow"; break;
case FPE_FLTUND: s = "floating-point underflow"; break;
case FPE_FLTRES: s = "floating-point inexact result"; break;
case FPE_FLTINV: s = "floating-point invalid operation"; break;
case FPE_FLTSUB: s = "subscript out of range"; break;
default:
s = "unrecognized si_code for SIGFPE"; break;
}
}
else if (sig == SIGILL) {
switch (sigcode->si_code) {
case ILL_ILLOPC: s = "illegal opcode"; break;
case ILL_ILLOPN: s = "illegal operand"; break;
case ILL_ILLADR: s = "illegal addressing mode"; break;
case ILL_ILLTRP: s = "illegal trap"; break;
case ILL_PRVOPC: s = "privileged opcode"; break;
case ILL_PRVREG: s = "privileged register"; break;
case ILL_COPROC: s = "coprocessor error"; break;
case ILL_BADSTK: s = "internal stack error"; break;
default:
s = "unrecognized si_code for SIGILL"; break;
}
}
else if (sig == SIGSEGV) {
switch (sigcode->si_code) {
case SEGV_MAPERR: s = "address not mapped to object"; break;
case SEGV_ACCERR: s = "invalid permissions for mapped object"; break;
default:
s = "unrecognized si_code for SIGSEGV"; break;
}
}
else if (sig == SIGBUS) {
switch (sigcode->si_code) {
case BUS_ADRALN: s = "invalid address alignment"; break;
case BUS_ADRERR: s = "nonexistent physical address"; break;
case BUS_OBJERR: s = "object-specific hardware error"; break;
default:
s = "unrecognized si_code for SIGBUS"; break;
}
}
else {
s = "unrecognized si_code";
}
if (s) {
#ifdef __USE_GNU
int works = 0;
Dl_info dlinfo;
if (dladdr(bt,&dlinfo) == 0) {
dlinfo.dli_fname = NULL;
dlinfo.dli_sname = NULL;
dlinfo.dli_fbase = 0;
}
else
works = 1;
if (sig == SIGFPE) {
int excepts = fegetexcept();
fprintf(stderr,
"%s %s [%s@%s:%d] Signal#%d was caused by %s [memaddr=%p] [excepts=0x%x [%d]] : %p at %s(%s), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig, s,
addr,
excepts, excepts,
bt,
dlinfo.dli_fname ? dlinfo.dli_fname : "<unknown_object>",
dlinfo.dli_sname ? dlinfo.dli_sname : "<unknown_function>",
nsigs);
}
else {
fprintf(stderr,
"%s %s [%s@%s:%d] Signal#%d was caused by %s [memaddr=%p] : %p at %s(%s), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig, s,
addr,
bt,
dlinfo.dli_fname ? dlinfo.dli_fname : "<unknown_object>",
dlinfo.dli_sname ? dlinfo.dli_sname : "<unknown_function>",
nsigs);
}
if (works && trace_size > 0) {
int ndigits = (trace_size > 0) ? 1 + (int)log10(trace_size) : 0;
int jt;
for (jt = 0; jt < trace_size; ++jt) {
void *pbt = trace[jt];
if (dladdr(pbt,&dlinfo) == 0) {
dlinfo.dli_fname = NULL;
dlinfo.dli_sname = NULL;
dlinfo.dli_fbase = 0;
}
fprintf(stderr,
"%s %s [%s@%s:%d] : [%*.*d]: %s %s %p %p # addr2line\n",
pfx,TIMESTR(tid),FFL,
ndigits, ndigits, jt,
dlinfo.dli_sname ? dlinfo.dli_sname : "<unknown_function>",
dlinfo.dli_fname ? dlinfo.dli_fname : "<unknown_object>",
dlinfo.dli_fbase,
pbt);
}
}
#else
fprintf(stderr,
"%s %s [%s@%s:%d] Signal#%d was caused by %s [memaddr=%p], nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig, s,
addr,
nsigs);
#endif
fflush(NULL);
}
}
#endif
}
if (nsigs > 1 || !nfirst) {
/*----- 2nd (and subsequent) calls to signal handler: spin harakiri-timeout + 60 sec, _exit ---------*/
int offset = 60;
int secs = drhook_harakiri_timeout+offset;
if (!drhook_use_lockfile) { /* Less output if lockfile was used ... */
fprintf(stderr,
"%s %s [%s@%s:%d] Calling signal_harakiri upon receipt of signal#%d"
" after %ds spin, nsigs = %d, nfirst = %d\n",
pfx,TIMESTR(tid),FFL,
sig,secs,nsigs,nfirst);
fflush(NULL);
}
spin(secs);
signal_harakiri(sig SIG_PASS_EXTRA_ARGS);
}
/* All below this point should be nsigs == 1 i.e. the first threat arriving signal_drhook() */
#ifdef RS6K
/*-- llcancel attempted but sometimes hangs ---
{
char *env = getenv("LOADL_STEP_ID");
if (env) {
char *cancel = "delayed_llcancel ";
char cmd[80];
sprintf(cmd,"%s %s &",cancel,env);
fprintf(stderr,"tid#%d issuing command: %s\n",tid,cmd;
fflush(NULL);
system(cmd);
}
}
------------------------------------*/
#endif
/* sigfillset(&newmask); -- dead code since sigprocmask() was not called */
/*
sigemptyset(&newmask);
sigaddset(&newmask, sig);
*/
/* Start critical region (we don't want any signals to interfere while doing this) */
/* sigprocmask(SIG_BLOCK, &newmask, &oldmask); */
if (nsigs == 1 && nfirst) {
/* Print Dr.Hook traceback */
const int ftnunitno = 0; /* stderr */
const int print_option = 2; /* calling tree */
int level = 0;
fprintf(stderr,
"%s %s [%s@%s:%d] Starting DrHook backtrace for signal#%d, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig,nsigs);
dump_hugepages(0,pfx,tid,sig,nsigs); /* We don't wanna enforce anymore -- this the first arg == 0 now */
if (drhook_dump_smaps) {
char filename[64];
snprintf(filename,sizeof(filename),"/proc/%ld/smaps",(long)unixtid);
dump_file(pfx,tid,sig,nsigs,filename);
}
if (drhook_dump_maps) {
char filename[64];
snprintf(filename,sizeof(filename),"/proc/%ld/maps",(long)unixtid);
dump_file(pfx,tid,sig,nsigs,filename);
}
if (drhook_dump_buddyinfo) {
dump_file(pfx,tid,sig,nsigs,"/proc/buddyinfo");
}
if (drhook_dump_meminfo) {
dump_file(pfx,tid,sig,nsigs,"/proc/meminfo");
}
fflush(NULL);
c_drhook_print_(&ftnunitno, &tid, &print_option, &level);
fflush(NULL);
/* To make it less likely that another thread generates a signal while we are
doing a traceback lets wait a while (seems to fix problems of the traceback
terminating abnormally. Probably a better way of doing this involving holding
off signals but sigprocmask is not safe in multithreaded code - P Towers Dec 10 2012
This was originally an issue with the Intel compiler but may be of benefit for other
compilers. Cannot see it doing harm - P Towers Aug 29 2013 */
spin(MIN(5,tid));
if (sig != SIGABRT && sig != SIGTERM) {
#ifdef RS6K
xl__sigdump(sig SIG_PASS_EXTRA_ARGS); /* Can't use xl__trce(...), since it also stops */
#endif
#if 1
/* Active code ? */
#if (defined(LINUX) || defined(SUN4)) && !defined(XT3) && !defined(XD1)
LinuxTraceBack(pfx,TIMESTR(tid),NULL);
#endif
#else
/* Dead code ? */
#if (defined(LINUX) || defined(SUN4)) && !defined(XT3) && !defined(XD1) && !defined(_CRAYC)
gdb__sigdump(sig SIG_PASS_EXTRA_ARGS);
#endif
#endif
#ifdef __INTEL_COMPILER
intel_trbk_(); /* from ../utilities/gentrbk.F90 */
#endif
#if defined(NECSX)
necsx_trbk_("signal_drhook",13); /* from ../utilities/gentrbk.F90 */
#endif
}
#ifdef VPP
#if defined(SA_SIGINFO) && SA_SIGINFO > 0
_TraceCalls(sigcontextptr); /* Need VPP's libmp.a by Pierre Lagier */
#endif
#endif
fprintf(stderr,
"%s %s [%s@%s:%d] DrHook backtrace done for signal#%d, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig,nsigs);
fflush(NULL);
}
/* sigprocmask(SIG_SETMASK, &oldmask, 0); */
/* End critical region : the original signal state restored */
{
int restored = 0, tdiff;
time_t t1, t2;
drhook_sigfunc_t u;
u.func3args = signal_drhook;
if (opt_propagate_signals &&
sl->old.sa_handler != SIG_DFL &&
sl->old.sa_handler != SIG_IGN &&
sl->old.sa_handler != u.func1args) {
u.func1args = sl->old.sa_handler;
if (atp_enabled) {
/* Restore the default, core-file creating action to these "ATP" recognized signals */
switch (sig) {
case SIGTERM:
if (atp_ignore_sigterm) break; /* SIGSEGV not reset to SIG_DFL as ATP now ignores SIGTERM */
/* Fall thru (see man atp on Cray) */
case SIGINT: /* Also, see ifssig.c : used as a RESTART signal, confusingly enough */
case SIGFPE:
case SIGILL:
case SIGTRAP:
case SIGABRT:
case SIGBUS:
case SIGSEGV:
case SIGSYS:
case SIGXCPU:
#if defined(SIGXFSZ)
case SIGXFSZ:
#endif
fprintf(stderr,
"%s %s [%s@%s:%d] Resetting SIGSEGV (%d) to "
"default signal handler (SIG_DFL) before calling ATP for signal#%d, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
SIGSEGV,sig,nsigs);
set_default_handler(SIGSEGV,1,1);
restored = 1;
break;
default:
break;
}
}
fprintf(stderr,
"%s %s [%s@%s:%d] Calling previous signal handler at %p for signal#%d, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
u.func1args,sig,nsigs);
time(&t1);
u.func3args(sig SIG_PASS_EXTRA_ARGS); /* This could now be the ATP */
time(&t2);
tdiff = (t2 - t1);
fprintf(stderr,
"%s %s [%s@%s:%d] Returned from previous signal handler"
" (at %p, signal#%d, time taken = %ds), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
u.func1args,sig,tdiff,nsigs);
if (atp_enabled && restored && atp_max_cores > 0) {
/* Assuming it was indeed ATP, then lets spin a bit to allow other cores be dumped */
int secs = MIN(drhook_harakiri_timeout,atp_max_analysis_time);
int grace = 60;
secs = 60 + MIN(tdiff * (atp_max_cores-1),secs);
if (secs > 0) {
fprintf(stderr,
"%s %s [%s@%s:%d] Before aborting (signal#%d) spin %ds (incl. grace %ds)"
" to give ATP time to write all #%d core file(s), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig,secs,grace,atp_max_cores,nsigs);
spin(secs);
}
}
if (sig != SIGABRT && sig != SIGTERM) {
if (atp_enabled && atp_max_cores > 0) {
fprintf(stderr,
"%s %s [%s@%s:%d] DrHook calls abort() and attempts to dump core (signal#%d), nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
sig,nsigs);
set_default_handler(SIGABRT,1,1);
abort();
}
}
/* Now proceed to definitive _exit() */
}
else {
fprintf(stderr,
"%s %s [%s@%s:%d] Not configured (DR_HOOK_PROPAGATE_SIGNALS=%d) or "
"can't call previous signal handler (for signal#%d) in the chain at %p, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
opt_propagate_signals,sig,
sl->old.sa_handler,nsigs);
}
}
}
{
int errcode = 128 + ABS(sig);
/* Make sure that the process/thread really exits now -- immediately !! */
fprintf(stderr, "%s %s [%s@%s:%d] Error _exit(%d) upon receipt of signal#%d, nsigs = %d\n",
pfx,TIMESTR(tid),FFL,
errcode,sig,nsigs);
fflush(NULL);
_exit(errcode);
}
cas_unlock(&thing);
}
void
c_drhook_set_mpi_()
{
dr_hook_procinfo_(&myproc, &nproc);
}
void
c_drhook_not_mpi_()
{
/* Emulates in a one call : export DR_HOOK_NOT_MPI=1" */
/* To have a desired effect, call BEFORE the very first call to DR_HOOK */
static char s[] = "DR_HOOK_NOT_MPI=1"; /* note: must be static */
putenv(s);
}
/*--- signal_drhook_init ---*/
static void
signal_drhook_init(int enforce)
{
char *env = getenv("DR_HOOK_SILENT");
int silent = env ? atoi(env) : 0;
int j;
dr_hook_procinfo_(&myproc, &nproc);
if (myproc < 1) myproc = 1; /* Just to enable output as if myproc was == 1 */
/* Signals may not yet been set, since MPI not initialized
Only enforce-parameter can enforce to set these => no output on myproc=1 */
if (!enforce && (myproc < 1 || nproc < 0)) return;
if (signals_set) return; /* Extra safety */
/* To present sumpini.F90 (f.ex.) initializing DrHook-signals in case of
DR_HOOK was turned off (=0), then set also export DR_HOOK_INIT_SIGNALS=0 */
env = getenv("DR_HOOK_INIT_SIGNALS");
if (env && *env == '0') {
signals_set = 2; /* Pretend they are set */
return; /* Never initialize signals via DrHook (dangerous, but sometimes necessary) */
}
if (!ec_drhook) {
int slen;
char hostname[EC_HOST_NAME_MAX];
char *pdot;
int ntids = 1;
coml_get_max_threads_(&ntids);
numthreads = ntids;
ec_drhook = calloc_drhook(ntids, sizeof(*ec_drhook));
slen = sizeof(ec_drhook[0].s);
timestr_len = sizeof(ec_drhook[0].timestr);
if (gethostname(hostname,sizeof(hostname)) != 0) strcpy(hostname,"unknown");
pdot = strchr(hostname,'.');
if (pdot) *pdot = '\0'; // cut short from "." char e.g. hostname.fmi.fi becomes just "hostname"
if (myproc == 1) {
fprintf(stderr,"[EC_DRHOOK:hostname:myproc:omptid:pid:unixtid] [YYYYMMDD:HHMMSS:epoch:walltime] [function@file:lineno] -- Max OpenMP threads = %d\n",ntids);
}
#if 1
{
extern void run_fortran_omp_parallel_ipfstr_(const int *,
void (*func)(const char *, int),
const char *, int);
run_fortran_omp_parallel_ipfstr_(&ntids,set_ec_drhook_label,hostname,strlen(hostname));
}
#else
#pragma omp parallel num_threads(ntids)
{
set_ec_drhook_label(hostname,strlen(hostname));
}
#endif
}
env = getenv("ATP_ENABLED");
atp_enabled = (env && *env == '1') ? 1 : 0;
if (atp_enabled) {
env = getenv("ATP_MAX_CORES");
if (env) atp_max_cores = atoi(env);
env = getenv("ATP_MAX_ANALYSIS_TIME");
if (env) atp_max_analysis_time = atoi(env);
env = getenv("ATP_IGNORE_SIGTERM");
if (env) atp_ignore_sigterm = atoi(env);
if (!silent && myproc == 1) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
fprintf(stderr,"%s %s [%s@%s:%d] ATP_ENABLED=%d\n",pfx,TIMESTR(tid),FFL,atp_enabled);
fprintf(stderr,"%s %s [%s@%s:%d] ATP_MAX_CORES=%d\n",pfx,TIMESTR(tid),FFL,atp_max_cores);
fprintf(stderr,"%s %s [%s@%s:%d] ATP_MAX_ANALYSIS_TIME=%d\n",pfx,TIMESTR(tid),FFL,atp_max_analysis_time);
fprintf(stderr,"%s %s [%s@%s:%d] ATP_IGNORE_SIGTERM=%d\n",pfx,TIMESTR(tid),FFL,atp_ignore_sigterm);
}
}
process_options();
for (j=1; j<=NSIG; j++) { /* Initialize */
drhook_sig_t *sl = &siglist[j];
sprintf(sl->name, "DR_HOOK_SIG#%d", j);
sl->active = 0;
sl->ignore_atexit = 0;
}
ignore_signals(silent); /* These signals will not be handled by DR_HOOK */
restore_default_signals(silent); /* These signals will be restored with SIG_DFL status (regardless if to-be-caught with DrHook or ATP or anyhing else) */
SETSIG(SIGABRT,0); /* Good to be first */
SETSIG(SIGBUS,0);
SETSIG(SIGSEGV,0);
#if defined(SIGEMT)
SETSIG(SIGEMT,0);
#endif
#if defined(SIGSTKFLT)
SETSIG(SIGSTKFLT,0); /* Stack fault */
#endif
#if !defined(NECSX)
/* For the moment turn off these on NEC SX ... */
SETSIG(SIGFPE,0);
SETSIG(SIGILL,0);
#endif
SETSIG(SIGTRAP,0); /* Should be switched off when used with debuggers */
// SETSIG(SIGINT,0); /* Also, see ifssig.c : used as a RESTART signal, confusingly enough */
if (atp_enabled) {
/* We let ATP to catch SIGQUIT (it uses this for non-failed tasks, we think) -- thus commented out */
/* SETSIG(SIGQUIT,0); */
/* Unless ATP ignores SIGTERM, we ignore it from DrHook -- thus conditionally commented out */
if (atp_ignore_sigterm) SETSIG(SIGTERM,0); /* Means: DrHook does NOT ignore SIGTERM -- ATP does */
}
else {
SETSIG(SIGQUIT,0);
SETSIG(SIGTERM,0);
}
#if defined(SIGIOT)
SETSIG(SIGIOT,0); /* Same as SIGABRT; Used to be a typo SIGIO ;-( */
#endif
SETSIG(SIGXCPU,1); /* ignore_atexit == 1 i.e. no profile info via atexit() */
#if defined(SIGXFSZ)
SETSIG(SIGXFSZ,0);
#endif
#if defined(SIGDANGER)
SETSIG(SIGDANGER,1); /* To catch the place where paging space gets dangerously low */
#endif
SETSIG(SIGSYS,0);
/* SETSIG(SIGCHLD); we may not want to catch this either; may interfere parallel processing */
/* -- not active
SETSIG(SIGCHLD);
SETSIG(SIGHUP);
SETSIG(SIGCONT);
*/
#if defined(SIGCORE)
SETSIG(SIGCORE,0); /* NEC SX core dumping */
#endif
#if defined(SIGDEAD)
SETSIG(SIGDEAD,0); /* NEC SX dead lock */
#endif
#if defined(SIGXMEM)
SETSIG(SIGXMEM,0); /* NEC SX exceeded memory size limit */
#endif
#if defined(SIGXDSZ)
SETSIG(SIGXDSZ,0); /* NEC SX exceeded data size limit */
#endif
#if defined(SIGMEM32)
SETSIG(SIGMEM32,0); /* NEC SX exceeded memory size limit of 32KB */
#endif
#if defined(SIGNMEM)
SETSIG(SIGNMEM,0); /* NEC SX exce error for no memory */
#endif
#if defined(SIGXABT)
SETSIG(SIGXABT,0); /* NEC SX distributed parallel program aborted */
#endif
/*
#if defined(SIG)
SETSIG(SIG,0);
#endif
*/
catch_signals(silent); /* Additional signals to be seen by DR_HOOK */
if (opt_gencore > 0 && opt_gencore_signal >= 1 && opt_gencore_signal <= NSIG) {
drhook_sigfunc_t u;
u.func3args = signal_gencore;
signal(opt_gencore_signal, u.func1args); /* A facility to dump core */
}
signals_set = 1; /* Signals are set now */
}
/*--- get_mon_out ---*/
static char *
get_mon_out(int me)
{
char *s = mon_out;
if (mon_out_procs == me || (mon_out_procs == -1 && me >= 1 && me <= nproc)) {
if (!mon_out) mon_out = strdup_drhook("drhook.prof.%d");
s = malloc_drhook((strlen(mon_out) + 20) * sizeof(*s));
sprintf(s,mon_out,me);
}
if (!s) s = strdup_drhook("drhook.prof.0");
return s;
}
/*--- get_memmon_out ---*/
static char *
get_memmon_out(int me)
{
char *s = NULL;
char *p = get_mon_out(me);
if (p) {
s = malloc_drhook((strlen(p) + 5) * sizeof(*s));
sprintf(s,"%s-mem",p);
}
if (!s) s = strdup_drhook("drhook.prof.0-mem");
return s;
}
/*--- random_memstat ---*/
static void
random_memstat(int tid, int enforce)
{
if (tid == 1 && opt_random_memstat > 0 && opt_random_memstat <= RAND_MAX) {
int random_number = rand();
if (enforce || random_number % opt_random_memstat == 0) {
long long int maxhwm = getmaxhwm_();
long long int maxstk = getmaxstk_();
if (drhook_stacksize_threshold > 0 && maxstk > drhook_stacksize_threshold) {
/* Abort hopefully with traceback */
char *pfx = PREFIX(tid);
long long int vmpeak = getvmpeak_() / (long long int) 1048576;
long long int threshold = drhook_stacksize_threshold / (long long int) 1048576;
long long int ompstk = drhook_omp_stacksize / (long long int) 1048576;
maxstk /= (long long int) 1048576;
maxhwm /= (long long int) 1048576;
fprintf(stderr,
"%s %s [%s@%s:%d] Stack usage [MB] very high : %lld > %lld (= %g x OMP_STACKSIZE=%lld ; maxhwm=%lld ; vmpeak=%lld)\n",
pfx,TIMESTR(tid),FFL,
maxstk,threshold,
opt_trace_stack,ompstk,
maxhwm,vmpeak);
RAISE(SIGABRT);
}
}
}
}
/*--- process_options ---*/
static void do_prof();
void /* Fortran callable */
c_drhook_process_options_(const int *lhook, const int *Myproc, const int *Nproc)
{
c_drhook_set_lhook_(lhook);
if (Myproc) myproc = *Myproc;
if (Nproc) nproc = *Nproc;
process_options();
}
#define OPTPRINT(fp,...) if (fp) fprintf(fp,__VA_ARGS__)
static void
process_options()
{
char *pfx = "";
char *env;
FILE *fp = NULL;
int tid, ienv, newline;
static int processed = 0;
if (processed) return;
tid = get_thread_id_();
env = getenv("DR_HOOK_SHOW_PROCESS_OPTIONS");
ienv = env ? atoi(env) : 1;
if (ienv == -1 || ienv == myproc) fp = stderr;
if (fp) pfx = PREFIX(tid);
OPTPRINT(fp,"%s %s [%s@%s:%d] fp = %p\n",pfx,TIMESTR(tid),FFL,fp);
env = getenv("DR_HOOK_ALLOW_COREDUMP");
if (env) {
ienv = atoi(env);
allow_coredump = (ienv == -1 || ienv == myproc) ? ienv : 0;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_ALLOW_COREDUMP=%d\n",pfx,TIMESTR(tid),FFL,allow_coredump);
if (allow_coredump) {
unsigned long long int hardlimit = 0;
int rc = set_unlimited_corefile(&hardlimit);
if (rc == 0) {
OPTPRINT(fp,"%s %s [%s@%s:%d] Hardlimit for core file is now %llu (0x%llx)\n",
pfx,TIMESTR(tid),FFL,hardlimit,hardlimit);
}
}
env = getenv("DR_HOOK_PROFILE");
if (env) {
char *s = calloc_drhook(strlen(env) + 15, sizeof(*s));
strcpy(s,env);
if (!strchr(env,'%')) strcat(s,".%d");
mon_out = strdup_drhook(s);
free_drhook(s);
}
if (mon_out) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_PROFILE=%s\n",pfx,TIMESTR(tid),FFL,mon_out);
env = getenv("DR_HOOK_PROFILE_PROC");
if (env) {
mon_out_procs = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_PROFILE_PROC=%d\n",pfx,TIMESTR(tid),FFL,mon_out_procs);
env = getenv("DR_HOOK_PROFILE_LIMIT");
if (env) {
percent_limit = atof(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_PROFILE_LIMIT=%.3f\n",pfx,TIMESTR(tid),FFL,percent_limit);
env = getenv("DR_HOOK_FUNCENTER");
if (env) {
opt_funcenter = atoi(env);
}
if (opt_funcenter) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_FUNCENTER=%d\n",pfx,TIMESTR(tid),FFL,opt_funcenter);
env = getenv("DR_HOOK_FUNCEXIT");
if (env) {
opt_funcexit = atoi(env);
}
if (opt_funcexit) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_FUNCEXIT=%d\n",pfx,TIMESTR(tid),FFL,opt_funcexit);
if (opt_funcenter || opt_funcexit) {
opt_gethwm = opt_getstk = 1;
}
env = getenv("DR_HOOK_TIMELINE");
if (env) {
opt_timeline = atoi(env);
}
if (opt_timeline) {
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE=%d\n",pfx,TIMESTR(tid),FFL,opt_timeline);
env = getenv("DR_HOOK_TIMELINE_THREAD");
if (env) {
opt_timeline_thread = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE_THREAD=%d\n",pfx,TIMESTR(tid),FFL,opt_timeline_thread);
env = getenv("DR_HOOK_TIMELINE_FORMAT");
if (env) {
opt_timeline_format = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE_FORMAT=%d\n",pfx,TIMESTR(tid),FFL,opt_timeline_format);
env = getenv("DR_HOOK_TIMELINE_UNITNO");
if (env) {
opt_timeline_unitno = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE_UNITNO=%d\n",pfx,TIMESTR(tid),FFL,opt_timeline_unitno);
env = getenv("DR_HOOK_TIMELINE_FREQ");
if (env) {
opt_timeline_freq = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE_FREQ=%lld\n",pfx,TIMESTR(tid),FFL,opt_timeline_freq);
env = getenv("DR_HOOK_TIMELINE_MB");
if (env) {
opt_timeline_MB = atof(env);
if (opt_timeline_MB < 0) opt_timeline_MB = 1.0;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMELINE_MB=%g\n",pfx,TIMESTR(tid),FFL,opt_timeline_MB);
}
if (myproc == 1) { /* Only applicable for master MPI task for now */
env = getenv("DR_HOOK_TRACE_STACK");
if (env) {
opt_trace_stack = atof(env);
if (opt_trace_stack < 0)
opt_trace_stack = 0;
else {
drhook_omp_stacksize = slave_stacksize();
if (drhook_omp_stacksize > 0) {
drhook_stacksize_threshold = opt_trace_stack * drhook_omp_stacksize;
opt_random_memstat = 1;
random_memstat(1,1);
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TRACE_STACK=%g\n",pfx,TIMESTR(tid),FFL,opt_trace_stack);
}
else
opt_trace_stack = 0;
}
}
}
if (!opt_random_memstat) {
env = getenv("DR_HOOK_RANDOM_MEMSTAT");
if (env) {
opt_random_memstat = atoi(env);
if (opt_random_memstat < 0) opt_random_memstat = 0;
if (opt_random_memstat > RAND_MAX) opt_random_memstat = RAND_MAX;
random_memstat(1,1);
}
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_RANDOM_MEMSTAT=%d (RAND_MAX=%d)\n",pfx,TIMESTR(tid),FFL,opt_random_memstat,RAND_MAX);
env = getenv("DR_HOOK_HASHBITS");
if (env) {
int value = atoi(env);
if (value < 1) value = 1;
else if (value > NHASHMAX) value = NHASHMAX;
nhash = value;
hashsize = HASHSIZE(nhash);
hashmask = HASHMASK(nhash);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_HASHBITS=%d\n",pfx,TIMESTR(tid),FFL,nhash);
env = getenv("DR_HOOK_NCALLSTACK");
if (env) {
int value = atoi(env);
if (value < 1) value = NCALLSTACK;
cstklen = value;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_NCALLSTACK=%d\n",pfx,TIMESTR(tid),FFL,cstklen);
env = getenv("DR_HOOK_HARAKIRI_TIMEOUT");
if (env) {
int value = atoi(env);
if (value < 1) value = drhook_harakiri_timeout_default;
drhook_harakiri_timeout = value;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_HARAKIRI_TIMEOUT=%d\n",pfx,TIMESTR(tid),FFL,drhook_harakiri_timeout);
env = getenv("DR_HOOK_USE_LOCKFILE");
if (env) {
int value = atoi(env);
drhook_use_lockfile = (value != 0) ? 1 : 0; /* currently accept just 0 or 1 */
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_USE_LOCKFILE=%d\n",pfx,TIMESTR(tid),FFL,drhook_use_lockfile);
env = getenv("DR_HOOK_TRAPFPE");
if (env) {
int value = atoi(env);
drhook_trapfpe = (value != 0) ? 1 : 0; /* currently accept just 0 or 1 */
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TRAPFPE=%d\n",pfx,TIMESTR(tid),FFL,drhook_trapfpe);
env = getenv("DR_HOOK_TRAPFPE_INVALID");
if (env) {
int value = atoi(env);
drhook_trapfpe_invalid = (value != 0) ? 1 : 0; /* currently accept just 0 or 1 */
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TRAPFPE_INVALID=%d\n",pfx,TIMESTR(tid),FFL,drhook_trapfpe_invalid);
env = getenv("DR_HOOK_TRAPFPE_DIVBYZERO");
if (env) {
int value = atoi(env);
drhook_trapfpe_divbyzero = (value != 0) ? 1 : 0; /* currently accept just 0 or 1 */
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TRAPFPE_DIVBYZERO=%d\n",pfx,TIMESTR(tid),FFL,drhook_trapfpe_divbyzero);
env = getenv("DR_HOOK_TRAPFPE_OVERFLOW");
if (env) {
int value = atoi(env);
drhook_trapfpe_overflow = (value != 0) ? 1 : 0; /* currently accept just 0 or 1 */
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TRAPFPE_OVERFLOW=%d\n",pfx,TIMESTR(tid),FFL,drhook_trapfpe_overflow);
env = getenv("DR_HOOK_TIMED_KILL");
if (env) {
drhook_timed_kill = strdup_drhook(env);
}
if (drhook_timed_kill) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_TIMED_KILL=%s\n",pfx,TIMESTR(tid),FFL,drhook_timed_kill);
env = getenv("DR_HOOK_DUMP_SMAPS");
if (env) {
ienv = atoi(env);
drhook_dump_smaps = (ienv != 0) ? 1 : 0;
}
if (drhook_dump_smaps) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_DUMP_SMAPS=%d\n",pfx,TIMESTR(tid),FFL,drhook_dump_smaps);
env = getenv("DR_HOOK_DUMP_MAPS");
if (env) {
ienv = atoi(env);
drhook_dump_maps = (ienv != 0) ? 1 : 0;
}
if (drhook_dump_maps) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_DUMP_MAPS=%d\n",pfx,TIMESTR(tid),FFL,drhook_dump_maps);
env = getenv("DR_HOOK_DUMP_BUDDYINFO");
if (env) {
ienv = atoi(env);
drhook_dump_buddyinfo = (ienv != 0) ? 1 : 0;
}
if (drhook_dump_buddyinfo) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_DUMP_BUDDYINFO=%d\n",pfx,TIMESTR(tid),FFL,drhook_dump_buddyinfo);
env = getenv("DR_HOOK_DUMP_MEMINFO");
if (env) {
ienv = atoi(env);
drhook_dump_meminfo = (ienv != 0) ? 1 : 0;
}
if (drhook_dump_meminfo) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_DUMP_MEMINFO=%d\n",pfx,TIMESTR(tid),FFL,drhook_dump_meminfo);
env = getenv("DR_HOOK_DUMP_HUGEPAGES");
if (env) {
double freq;
int nel = sscanf(env,"%d,%lf",&ienv,&freq);
if (nel == 2) {
drhook_dump_hugepages = (freq > 0 && (ienv == -1 || ienv == myproc)) ? ienv : 0;
if (drhook_dump_hugepages) drhook_dump_hugepages_freq = freq;
}
}
if (drhook_dump_hugepages) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_DUMP_HUGEPAGES=%d,%.6f\n",pfx,TIMESTR(tid),FFL,
drhook_dump_hugepages,drhook_dump_hugepages_freq);
env = getenv("DR_HOOK_GENCORE");
if (env) {
opt_gencore = atoi(env);
}
if (opt_gencore) {
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_GENCORE=%d\n",pfx,TIMESTR(tid),FFL,opt_gencore);
env = getenv("DR_HOOK_GENCORE_SIGNAL");
if (env) {
int itmp = atoi(env);
if (itmp >= 1 && itmp <= NSIG && itmp != SIGABRT) {
opt_gencore_signal = itmp;
}
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_GENCORE_SIGNAL=%d\n",pfx,TIMESTR(tid),FFL,opt_gencore_signal);
}
env = getenv("DR_HOOK_HPMSTOP");
if (env) {
char *s = strdup_drhook(env);
long long int a;
double b;
int n = 0;
env = s;
while (*env) {
if (isspace(*env) || *env == ',') *env = ' ';
env++;
}
n = sscanf(s,"%lld %lf",&a,&b);
if (n >= 1) opt_hpmstop_threshold = a;
if (n >= 2) opt_hpmstop_mflops = b;
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_HPMSTOP=%lld,%.15g\n",
pfx,TIMESTR(tid),FFL,opt_hpmstop_threshold,opt_hpmstop_mflops);
free_drhook(s);
}
newline = 0;
env = getenv("DR_HOOK_OPT");
if (env) {
const char delim[] = ", \t/";
char *comma = " DR_HOOK_OPT=\"";
char *s = strdup_drhook(env);
char *p = s;
while (*p) {
if (islower(*p)) *p = toupper(*p);
p++;
}
p = strtok(s,delim);
/* if (p) OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_OPT=\"",pfx,TIMESTR(tid)); */
if (p && fp) {
fprintf(fp,"%s %s [%s@%s:%d]",pfx,TIMESTR(tid),FFL);
newline = 1;
}
while (p) {
/* Assume that everything is OFF by default */
if (strequ(p,"ALL")) { /* all except profiler data */
opt_gethwm = opt_getstk = opt_getrss = opt_getpag = opt_walltime = opt_cputime = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"ALL"); comma = ",";
}
else if (strequ(p,"MEM") || strequ(p,"MEMORY")) {
opt_gethwm = opt_getstk = opt_getrss = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"MEMORY"); comma = ",";
}
else if (strequ(p,"TIME") || strequ(p,"TIMES")) {
opt_walltime = opt_cputime = 1;
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"TIMES"); comma = ",";
}
else if (strequ(p,"HWM") || strequ(p,"HEAP")) {
opt_gethwm = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"HEAP"); comma = ",";
}
else if (strequ(p,"STK") || strequ(p,"STACK")) {
opt_getstk = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"STACK"); comma = ",";
}
else if (strequ(p,"RSS")) {
opt_getrss = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"RSS"); comma = ",";
}
else if (strequ(p,"PAG") || strequ(p,"PAGING")) {
opt_getpag = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"PAGING"); comma = ",";
}
else if (strequ(p,"WALL") || strequ(p,"WALLTIME")) {
opt_walltime = 1;
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"WALLTIME"); comma = ",";
}
else if (strequ(p,"CPU") || strequ(p,"CPUTIME")) {
opt_cputime = 1;
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"CPUTIME"); comma = ",";
}
else if (strequ(p,"CALLS") || strequ(p,"COUNT")) {
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"CALLS"); comma = ",";
}
else if (strequ(p,"MEMPROF")) {
opt_memprof = 1;
drhook_memtrace = 1;
opt_gethwm = opt_getstk = opt_getrss = 1;
opt_getpag = 1;
opt_calls = 1;
any_memstat++;
OPTPRINT(fp,"%s%s",comma,"MEMPROF"); comma = ",";
}
else if (strequ(p,"PROF") || strequ(p,"WALLPROF")) {
opt_wallprof = 1;
opt_walltime = 1;
opt_cpuprof = 0; /* Note: Switches cpuprof OFF */
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"WALLPROF"); comma = ",";
}
else if (strequ(p,"CPUPROF")) {
opt_cpuprof = 1;
opt_cputime = 1;
opt_wallprof = 0; /* Note: Switches walprof OFF */
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"CPUPROF"); comma = ",";
}
else if (strequ(p,"HPM") || strequ(p,"HPMPROF") || strequ(p,"MFLOPS")) {
opt_hpmprof = 1;
opt_wallprof = 1; /* Note: Implies wallprof (or prof), not cpuprof */
opt_walltime = 1;
opt_cpuprof = 0; /* Note: Switches cpuprof OFF */
opt_calls = 1;
OPTPRINT(fp,"%s%s",comma,"HPMPROF"); comma = ",";
}
else if (strequ(p,"TRIM")) {
opt_trim = 1;
OPTPRINT(fp,"%s%s",comma,"TRIM"); comma = ",";
}
else if (strequ(p,"SELF")) {
opt_self = 2;
OPTPRINT(fp,"%s%s",comma,"SELF"); comma = ",";
}
else if (strequ(p,"NOSELF")) {
opt_self = 0;
OPTPRINT(fp,"%s%s",comma,"NOSELF"); comma = ",";
}
else if (strequ(p,"NOPROP") || strequ(p,"NOPROPAGATE") ||
strequ(p,"NOPROPAGATE_SIGNALS")) {
opt_propagate_signals = 0;
OPTPRINT(fp,"%s%s",comma,"NOPROPAGATE_SIGNALS"); comma = ",";
}
else if (strequ(p,"NOSIZE") || strequ(p,"NOSIZEINFO")) {
opt_sizeinfo = 0;
OPTPRINT(fp,"%s%s",comma,"NOSIZEINFO"); comma = ",";
}
else if (strequ(p,"CLUSTER") || strequ(p,"CLUSTERINFO")) {
opt_clusterinfo = 1;
OPTPRINT(fp,"%s%s",comma,"CLUSTERINFO"); comma = ",";
}
else if (strequ(p,"CALLPATH")) {
opt_callpath = 1;
OPTPRINT(fp,"%s%s",comma,"CALLPATH"); comma = ",";
}
p = strtok(NULL,delim);
}
free_drhook(s);
if (*comma == ',') {
OPTPRINT(fp,"\"\n");
newline = 0;
}
if (newline) OPTPRINT(fp,"\n");
if (opt_callpath) {
env = getenv("DR_HOOK_CALLPATH_INDENT");
if (env) {
callpath_indent = atoi(env);
if (callpath_indent < 1 || callpath_indent > 8) callpath_indent = callpath_indent_default;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_CALLPATH_INDENT=%d\n",pfx,TIMESTR(tid),FFL,callpath_indent);
env = getenv("DR_HOOK_CALLPATH_DEPTH");
if (env) {
callpath_depth = atoi(env);
if (callpath_depth < 0) callpath_depth = callpath_depth_default;
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_CALLPATH_DEPTH=%d\n",pfx,TIMESTR(tid),FFL,callpath_depth);
env = getenv("DR_HOOK_CALLPATH_PACKED");
if (env) {
callpath_packed = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_CALLPATH_PACKED=%d\n",pfx,TIMESTR(tid),FFL,callpath_packed);
env = getenv("DR_HOOK_CALLTRACE");
if (env) {
opt_calltrace = atoi(env);
}
OPTPRINT(fp,"%s %s [%s@%s:%d] DR_HOOK_CALLTRACE=%d\n",pfx,TIMESTR(tid),FFL,opt_calltrace);
}
if (opt_wallprof || opt_cpuprof || opt_memprof || opt_timeline) {
atexit(do_prof);
}
}
else {
if (opt_timeline) atexit(do_prof);
} /* if (env) */
processed = 1;
}
/*--- trim ---*/
static const char *
trim(const char *name, int *n)
{
const char *from;
int len;
int name_len = *n;
while (*name && isspace(*name) && name_len > 0) {
/* skip leading blanks */
name++;
name_len--;
}
len = 0;
from = name;
while (*from && !isspace(*from) && name_len > 0) {
/* find first space point, if any */
from++;
len++;
name_len--;
}
*n = len;
if (!name) {
/* Never actually called (unless a true fatality) */
ABOR1("***Fatal error in drhook.c:trim()-function");
}
return name;
}
/*--- insertkey ---*/
static drhook_key_t *
insertkey(int tid, const drhook_key_t *keyptr_in)
{
drhook_key_t *keyptr = NULL;
if (tid >= 1 && tid <= numthreads) {
/* no trimming available for this; just raw eval & insert */
unsigned int hash = hashfunc(keyptr_in->name, keyptr_in->name_len);
keyptr = &keydata[tid-1][hash];
for (;;) {
if (!keyptr->name) { /* A free slot */
memcpy(keyptr,keyptr_in,sizeof(*keyptr));
keyptr->next = NULL;
break;
}
else {
if (!keyptr->next) {
keyptr->next = calloc_drhook(1, sizeof(drhook_key_t)); /* chaining */
}
keyptr = keyptr->next;
} /* if (!keyptr->name) ... else ... */
} /* for (;;) */
} /* if (tid >= 1 && tid <= numthreads) */
return keyptr;
}
/*--- getkey ---*/
static drhook_key_t *
getkey(int tid, const char *name, int name_len,
const char *filename, int filename_len,
const double *walltime, const double *cputime,
const equivalence_t *callpath, int callpath_len,
int *free_callpath)
{
drhook_key_t *keyptr = NULL;
if (tid >= 1 && tid <= numthreads) {
unsigned int hash, fullhash;
if (opt_trim) name = trim(name, &name_len);
hash = hashfunc(name, name_len);
if (callpath) {
callpath_hashfunc(hash, callpath, callpath_len, &fullhash);
#ifdef DEBUG
fprintf(stderr,
"getkey: name='%.*s', name_len=%d, callpath_len=%d, fullhash=%u\n",
name_len, name, name_len, callpath_len, fullhash);
#endif
}
keyptr = &keydata[tid-1][hash];
for (;;) {
int found = 0;
if (!keyptr->name) { /* A free slot */
keyptr->name = malloc_drhook((name_len+1)*sizeof(*name));
keyptr->name_len = name_len;
if (opt_trim) {
const char *from = name;
char *to = keyptr->name;
int len = name_len;
for (; len>0; from++, len--) {
*to++ = islower(*from) ? toupper(*from) : *from;
}
*to = 0;
}
else {
memcpy(keyptr->name, name, name_len);
keyptr->name[name_len] = 0;
}
if (filename_len > 0 &&
filename &&
*filename) {
char *psave = NULL;
char *p = psave = malloc_drhook((filename_len+1)*sizeof(*filename));
memcpy(p, filename, filename_len);
p[filename_len] = 0;
{ /* Strip out dirname */
char *s = strrchr(p,'/');
if (s) p = s+1;
}
keyptr->filename = strdup_drhook(p);
free_drhook(psave);
}
if (callpath) {
if (free_callpath) *free_callpath = 0;
keyptr->callpath = callpath;
keyptr->callpath_len = callpath_len;
keyptr->callpath_fullhash = fullhash;
}
found = 1;
}
if (found ||
(keyptr->name_len == name_len &&
(!callpath || (callpath && keyptr->callpath &&
keyptr->callpath_len == callpath_len &&
keyptr->callpath_fullhash == fullhash)) &&
((!opt_trim && *keyptr->name == *name && strnequ(keyptr->name, name, name_len)) ||
(opt_trim && strncasecmp(keyptr->name, name, name_len) == 0)))) {
if (opt_walltime) keyptr->wall_in = walltime ? *walltime : WALLTIME();
if (opt_cputime) keyptr->cpu_in = cputime ? *cputime : CPUTIME();
if (any_memstat) memstat(keyptr,&tid,1);
if (opt_calls) {
keyptr->calls++;
keyptr->status++;
}
insert_calltree(tid, keyptr);
break; /* for (;;) */
}
else {
if (!keyptr->next) {
keyptr->next = calloc_drhook(1, sizeof(drhook_key_t)); /* chaining */
}
keyptr = keyptr->next;
} /* if (found ...) else ... */
} /* for (;;) */
curkeyptr[tid-1] = keyptr;
} /* if (tid >= 1 && tid <= numthreads) */
return keyptr;
}
/*--- putkey ---*/
static void
putkey(int tid, drhook_key_t *keyptr, const char *name, int name_len,
int sizeinfo,
double *walltime, double *cputime)
{
const int sig = SIGABRT;
const char sl_name[] = "SIGABRT";
drhook_calltree_t *treeptr = (tid >= 1 && tid <= numthreads) ? thiscall[tid-1] : NULL;
if (!treeptr || !treeptr->active || treeptr->keyptr != keyptr) {
char *pfx = PREFIX(tid);
char *s;
unsigned int hash;
if (opt_trim) name = trim(name, &name_len);
hash = hashfunc(name, name_len);
s = strdup2_drhook(name,name_len);
if (opt_trim) {
char *p = s;
while (*p) {
if (islower(*p)) *p = toupper(*p);
p++;
}
}
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: Dr.Hook has detected an invalid"
" key-pointer/handle while leaving the routine '%s' [hash=%u]\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,s,hash);
if (treeptr) {
equivalence_t u;
u.keyptr = treeptr->keyptr;
hash = (u.keyptr && u.keyptr->name) ? hashfunc(u.keyptr->name,u.keyptr->name_len) : 0;
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: Expecting the key-pointer=%p"
" and treeptr->active-flag = 1\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,u.keyptr);
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: A probable routine missing the closing"
" DR_HOOK-call is '%s' [hash=%u]\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,
(u.keyptr && u.keyptr->name) ? u.keyptr->name : NIL, hash);
u.keyptr = keyptr;
hash = (u.keyptr && u.keyptr->name) ? hashfunc(u.keyptr->name,u.keyptr->name_len) : 0;
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: Got a key-pointer=%p"
" and treeptr->active-flag = %d\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,u.keyptr,treeptr->active);
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: This key-pointer maybe associated with"
" the routine '%s' [hash=%u]\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,
(u.keyptr && u.keyptr->name) ? u.keyptr->name : NIL, hash);
u.keyptr = curkeyptr[tid-1];
hash = (u.keyptr && u.keyptr->name) ? hashfunc(u.keyptr->name,u.keyptr->name_len) : 0;
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: The current key-pointer (=%p) thinks"
" it maybe associated with the routine '%s' [hash=%u]\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name,
u.keyptr,
(u.keyptr && u.keyptr->name) ? u.keyptr->name : NIL, hash);
}
free_drhook(s);
fprintf(stderr,
"%s %s [%s@%s:%d] [signal#%d(%s)]: Aborting...\n",
pfx,TIMESTR(tid),FFL,
sig,sl_name);
RAISE(SIGABRT);
}
else if (tid >= 1 && tid <= numthreads) {
double delta_wall = 0;
double delta_cpu = 0;
if (any_memstat) memstat(keyptr,&tid,0);
if (opt_calls) keyptr->status--;
if (opt_sizeinfo && sizeinfo > 0) {
if (keyptr->sizeinfo == 0) { /* First time */
keyptr->min_sizeinfo = sizeinfo;
keyptr->max_sizeinfo = sizeinfo;
}
else {
keyptr->min_sizeinfo = MIN(keyptr->min_sizeinfo, sizeinfo);
keyptr->max_sizeinfo = MAX(keyptr->max_sizeinfo, sizeinfo);
}
keyptr->sizeinfo += sizeinfo;
}
if (opt_cputime && cputime) {
*cputime = CPUTIME();
delta_cpu = *cputime - keyptr->cpu_in;
}
if (opt_walltime && walltime) {
*walltime = WALLTIME();
delta_wall = *walltime - keyptr->wall_in;
}
if (opt_walltime) keyptr->delta_wall_all += delta_wall;
if (opt_cputime) keyptr->delta_cpu_all += delta_cpu;
remove_calltree(tid, keyptr, &delta_wall, &delta_cpu);
}
}
/*--- init_drhook ---*/
static void
init_drhook(int ntids)
{
if (numthreads == 0 || !keydata || !calltree || !keyself || !overhead || !curkeyptr || !cstk) {
int j;
if (pid == -1) { /* Ensure that just called once */
{
/* Invoke once : timers, memory counters etc. to "wake them up" */
(void) WALLTIME();
(void) CPUTIME();
(void) gethwm_();
(void) getmaxhwm_();
(void) getrss_();
(void) getmaxrss_();
(void) getstk_();
(void) getmaxstk_();
(void) getpag_();
}
#ifdef RS6K
irtc_start = irtc();
#endif
#ifdef CRAYXT
dclock_start = dclock();
#endif
#if defined(SV2) || defined(XD1) || defined(XT3)
#if defined(SV2)
irtc_start = _rtc();
#else
irtc_start = irtc_();
#endif
my_irtc_rate = irtc_rate_();
my_inv_irtc_rate = 1.0/my_irtc_rate;
#endif
start_stamp = timestamp();
{
char *env = getenv("DR_HOOK_SHOW_LOCK"); /* export DR_HOOK_SHOW_LOCK=1 to show the lock-info */
int konoff = env ? atoi(env) : 0;
int kret = 0;
if (konoff == 1) coml_set_debug_(&konoff, &kret);
INIT_LOCKID_WITH_NAME(&DRHOOK_lock,"drhook.c:DRHOOK_lock");
if (kret != 0) {
konoff = 0;
coml_set_debug_(&konoff, &kret);
}
}
#if defined(NECSX)
{ /* If C-programs compiled with -traceback, then NEC/F90
MESPUT-call will also includes C-routines in the traceback if
in addition 'export C_TRACEBACK=YES' */
char *env = getenv("C_TRACEBACK");
if (!env) {
/* Override only if C_TRACEBACK hadn't already been defined */
static char s[] = "C_TRACEBACK=YES"; /* note: must be static */
putenv(s);
}
}
#endif
ec_set_umask_();
pid = getpid();
signal_drhook_init(1); /* myproc gets set .. if not earlier */
process_options();
set_timed_kill();
drhook_lhook = 1;
}
if (!keydata) {
keydata = malloc_drhook(sizeof(**keydata) * ntids);
for (j=0; j<ntids; j++) {
keydata[j] = calloc_drhook(hashsize, sizeof(drhook_key_t));
}
}
if (!cstk) {
cstk = calloc_drhook(ntids, sizeof(**cstk));
}
if (!calltree) {
calltree = malloc_drhook(sizeof(**calltree) * ntids);
thiscall = malloc_drhook(sizeof(**thiscall) * ntids);
for (j=0; j<ntids; j++) {
thiscall[j] = calltree[j] = calloc_drhook(1,sizeof(drhook_calltree_t));
}
}
if (!keyself && opt_self && (opt_wallprof || opt_cpuprof || opt_hpmprof)) {
const char *name = "$drhook";
int name_len = strlen(name);
keyself = malloc_drhook(sizeof(**keyself) * ntids);
for (j=0; j<ntids; j++) {
drhook_key_t *keyptr = keyself[j] = calloc_drhook(1,sizeof(drhook_key_t));
keyptr->name = strdup_drhook(name);
keyptr->name_len = name_len;
}
}
if (!overhead) {
overhead = calloc_drhook(ntids,sizeof(*overhead));
}
if (!curkeyptr) {
curkeyptr = malloc_drhook(sizeof(**curkeyptr) * ntids);
for (j=0; j<ntids; j++) {
curkeyptr[j] = NULL;
}
}
numthreads = ntids;
if (!timeline) {
if (opt_timeline_unitno >= 0 && opt_timeline_freq >= 1 &&
(opt_timeline == myproc || opt_timeline == -1)) {
timeline = calloc_drhook(ntids, sizeof(*timeline));
}
if (timeline) drhook_memtrace = 1;
if (timeline) {
/* The first timeline-call */
const int ftnunitno = opt_timeline_unitno;
const int master = 1;
const int print_option = +7;
int initlev = 0;
c_drhook_print_(&ftnunitno, &master, &print_option, &initlev);
}
}
init_hpm(1); /* First thread */
}
}
/*-- overhead-macro --*/
#define OVERHEAD(tid,walltime_in,cputime_in,delta,calc_delta) \
if (overhead && tid >= 1 && tid <= numthreads) { \
if (calc_delta) { \
if (opt_walltime) delta = WALLTIME() - walltime_in; \
else if (opt_cputime) delta = CPUTIME() - cputime_in; \
else delta = 0; \
} \
overhead[tid-1] += delta; \
}
/*--- itself ---*/
#define ITSELF_0 \
double delta = 0; \
drhook_key_t *keyptr_self = keyself ? itself(NULL,*thread_id,0,NULL,&walltime,&cputime) : NULL;
#define ITSELF_1 \
if (keyptr_self) { \
(void) itself(keyptr_self,*thread_id,1,&delta,&walltime,&cputime); \
if (opt_wallprof) u.keyptr->delta_wall_child += delta; \
else u.keyptr->delta_cpu_child += delta; \
OVERHEAD(*thread_id,walltime,cputime,delta,0); \
} \
else { \
OVERHEAD(*thread_id,walltime,cputime,delta,1); \
}
static drhook_key_t *
itself(drhook_key_t *keyptr_self,
int tid, int opt, double *delta_time,
const double *walltime, const double *cputime)
{
drhook_key_t *keyptr = NULL;
if (keyself) {
keyptr = keyptr_self ? keyptr_self : keyself[tid-1];
if (opt == 0) {
if (opt_wallprof) keyptr->wall_in = walltime ? *walltime : WALLTIME();
else keyptr->cpu_in = cputime ? *cputime : CPUTIME();
keyptr->calls++;
}
else if (opt == 1) {
double delta = 0;
if (opt_wallprof) {
delta = walltime ? (*walltime - keyptr->wall_in) : (WALLTIME() - keyptr->wall_in);
keyptr->delta_wall_all += delta;
}
else {
delta = cputime ? (*cputime - keyptr->cpu_in) : (CPUTIME() - keyptr->cpu_in);
keyptr->delta_cpu_all += delta;
}
if (delta_time) *delta_time = delta;
}
}
return keyptr;
}
/*--- commie -routines : adds "," i.e. comma after each 3 digit, e.g.:
1234567890 becomes more readable 1,234,567,890 */
static void
lld_commie(long long int n, char sd[])
{
const char comma = ',';
char s[DRHOOK_STRBUF];
char *p;
int len, ncommas;
sprintf(s,"%lld",n);
len = strlen(s);
ncommas = (len-1)/3;
if (ncommas > 0) {
char *pd = sd + len + ncommas;
*pd-- = 0;
p = s + len - 1;
len = 0;
while (p-s >= 0) {
*pd-- = *p--;
len++;
if (p-s >= 0 && len%3 == 0) *pd-- = comma;
}
}
else {
strcpy(sd,s);
}
}
static void
dbl_commie(double n, char sd[])
{
const char comma = ',';
char s[DRHOOK_STRBUF];
char *p;
int len, ncommas;
sprintf(s,"%.0f",n);
len = strlen(s);
ncommas = (len-1)/3;
if (ncommas > 0) {
char *pd = sd + len + ncommas;
*pd-- = 0;
p = s + len - 1;
len = 0;
while (p-s >= 0) {
*pd-- = *p--;
len++;
if (p-s >= 0 && len%3 == 0) *pd-- = comma;
}
}
else {
strcpy(sd,s);
}
}
/*--- callpath as a "pathname" ---*/
static void
unroll_callpath(FILE *fp, int len,
const equivalence_t *callpath, int callpath_len)
{
if (fp && callpath && callpath_len > 0) {
int j;
for (j=0; j<callpath_len; callpath++, j++) {
if (callpath && callpath->keyptr && callpath->keyptr->name) {
const char *name = callpath->keyptr->name;
int name_len = callpath->keyptr->name_len;
len -= callpath_indent;
if (len < 0) len = 0;
fprintf(fp,"\n%*s%.*s",len," ",name_len,name);
}
#ifdef DEBUG
else {
fprintf(fp,
"\n????callpath=%p, callpath->keyptr=%p, callpath->keyptr->name='%s'",
callpath, callpath ? callpath->keyptr : 0,
(callpath && callpath->keyptr && callpath->keyptr->name) ?
callpath->keyptr->name : NIL);
}
#endif
}
} /* if (fp) */
}
static equivalence_t *
get_callpath(int tid, int *callpath_len)
{
int depth = 0;
equivalence_t *callpath = NULL;
if (tid >= 1 && tid <= numthreads) {
const drhook_calltree_t *treeptr = thiscall[tid-1];
while (treeptr && treeptr->active && depth < callpath_depth) {
depth++;
treeptr = treeptr->prev;
}
if (depth > 0) {
int j = 0;
callpath = malloc_drhook(sizeof(*callpath) * depth);
treeptr = thiscall[tid-1];
while (treeptr && treeptr->active && j < callpath_depth) {
callpath[j].keyptr = treeptr->keyptr;
j++;
treeptr = treeptr->prev;
}
} /* if (depth > 0) */
} /* if (tid >= 1 && tid <= numthreads) */
if (callpath_len) *callpath_len = depth;
return callpath;
}
/*--- profiler output ---*/
static int do_prof_off = 0;
static void
do_prof()
{
/* to avoid recursive signals while atexit() (e.g. SIGXCPU) */
if (signal_handler_ignore_atexit) return;
if (!do_prof_off && (opt_wallprof || opt_cpuprof)) {
/* CPU, wall-clock and/or MFlop/s profiling */
const int ftnunitno = 0;
const int master = 1;
const int print_option = 3;
int initlev = 0;
c_drhook_print_(&ftnunitno, &master, &print_option, &initlev);
}
if (!do_prof_off && opt_memprof) {
/* Memory profiling */
const int ftnunitno = 0;
const int master = 1;
const int print_option = 4;
int initlev = 0;
c_drhook_print_(&ftnunitno, &master, &print_option, &initlev);
}
if (!do_prof_off && timeline) {
/* The last timeline-call */
const int ftnunitno = opt_timeline_unitno;
const int master = 1;
const int print_option = -7;
int initlev = 0;
c_drhook_print_(&ftnunitno, &master, &print_option, &initlev);
}
}
void c_drhook_prof_()
{
if (ec_drhook) {
do_prof();
do_prof_off = 1;
}
}
/*--- Check watch points ---*/
// Forward declarations of subroutines defined in dr_hook_prt.F90
void dr_hook_prt_logical_( const int* kunit, const void* ptr, const int* n );
void dr_hook_prt_char_( const int* kunit, const void* ptr, const int* n );
void dr_hook_prt_i4_( const int* kunit, const void* ptr, const int* n );
void dr_hook_prt_i8_( const int* kunit, const void* ptr, const int* n );
void dr_hook_prt_r4_( const int* kunit, const void* ptr, const int* n );
void dr_hook_prt_r8_( const int* kunit, const void* ptr, const int* n );
typedef enum { /* See dr_hook_watch_mod.F90 */
KEYNONE = 0,
KEYLOG = 1,
KEYCHAR = 2,
KEY_I4 = 4,
KEY_I8 = 8,
KEY_R4 = 16,
KEY_R8 = 32
} PrintWatchKeys_t;
static void print_watch(int ftnunitno, int key, const void *ptr, int n)
{
if (ptr && key > KEYNONE && n > 0) {
int nmax = n;
if (key == KEYLOG) {
dr_hook_prt_logical_(&ftnunitno, ptr, &nmax);
}
else if (key == KEYCHAR) {
dr_hook_prt_char_(&ftnunitno, ptr, &nmax);
}
else if (key == KEY_I4) {
dr_hook_prt_i4_(&ftnunitno, ptr, &nmax);
}
else if (key == KEY_I8) {
dr_hook_prt_i8_(&ftnunitno, ptr, &nmax);
}
else if (key == KEY_R4) {
dr_hook_prt_r4_(&ftnunitno, ptr, &nmax);
}
else if (key == KEY_R8) {
dr_hook_prt_r8_(&ftnunitno, ptr, &nmax);
}
}
}
static void
check_watch(const char *label,
const char *name,
int name_len,
int allow_abort)
{
if (watch) {
int print_traceback = 1;
drhook_watch_t *p = watch;
coml_set_lockid_(&DRHOOK_lock);
while (p) {
if (p->active) {
unsigned int crc32 = 0;
int calc_crc = 0;
const char *first_nbytes = p->ptr;
int changed = memcmp(first_nbytes,p->ptr,p->watch_first_nbytes);
if (!changed) {
/* The first nbytes were still the same; checking if crc has changed ... */
crc32_(p->ptr, &p->nbytes, &crc32);
changed = (crc32 != p->crc32);
calc_crc = 1;
}
if (changed) {
int tid = get_thread_id_();
char *pfx = PREFIX(tid);
if (!calc_crc) crc32_(p->ptr, &p->nbytes, &crc32);
fprintf(stderr,
"%s %s [%s@%s:%d] ***%s: Changed watch point '%s' at %p (%d bytes [#%d values])"
" -- %s %.*s : new crc32=%u\n",
pfx,TIMESTR(tid),FFL,
p->abort_if_changed ? "Error" : "Warning",
p->name, p->ptr, p->nbytes, p->nvals,
label, name_len, name, crc32);
print_watch(0, p->printkey, p->ptr, p->nvals);
if (print_traceback) {
LinuxTraceBack(pfx,TIMESTR(tid),NULL);
print_traceback = 0;
}
if (allow_abort && p->abort_if_changed) {
coml_unset_lockid_(&DRHOOK_lock); /* An important unlocking on Linux; otherwise hangs (until time-out) */
RAISE(SIGABRT);
}
#if 0
p->active = 0; /* No more these messages for this array */
watch_count--;
#else
p->crc32 = crc32;
#endif
}
}
p = p->next;
} /* while (p) */
coml_unset_lockid_(&DRHOOK_lock);
}
}
void
c_drhook_check_watch_(const char *where,
const int *allow_abort
/* Hidden length */
, int where_len)
{
if (watch && watch_count > 0) check_watch("whilst at", where, where_len, *allow_abort);
}
/*** PUBLIC ***/
#define TIMERS \
double walltime = opt_walltime ? WALLTIME() : 0; \
double cputime = opt_cputime ? CPUTIME() : 0; \
long long int hwm = opt_gethwm ? gethwm_() : 0; \
long long int stk = opt_getstk ? getstk_() : 0
/*=== c_drhook_set_lhook_ ===*/
void
c_drhook_set_lhook_(const int *lhook)
{
if (lhook) drhook_lhook = *lhook;
}
/*=== c_drhook_getenv_ ===*/
void
c_drhook_getenv_(const char *s,
char *value,
/* Hidden arguments */
int slen,
const int valuelen)
{
char *env = NULL;
char *p = malloc_drhook(slen+1);
if (!p) {
fprintf(stderr,"c_drhook_getenv_(): Unable to allocate %d bytes of memory\n", slen+1);
RAISE(SIGABRT);
}
memcpy(p,s,slen);
p[slen]='\0';
memset(value, ' ', valuelen);
env = getenv(p);
if (env) {
int len = strlen(env);
if (valuelen < len) len = valuelen;
memcpy(value,env,len);
}
free_drhook(p);
}
/*=== c_drhook_init_ ===*/
void
c_drhook_init_(const char *progname,
const int *num_threads
/* Hidden length */
,int progname_len)
{
init_drhook(*num_threads);
max_threads = MAX(1,*num_threads);
if (a_out) free_drhook(a_out);
progname = trim(progname, &progname_len);
if (progname_len > 0) {
a_out = calloc_drhook(progname_len+1,sizeof(*progname));
memcpy(a_out, progname, progname_len);
}
else {
/* progname is a blank string;
this is most likely due to a Fortran-call to getarg
from program that has a C-main program, thus Fortran getarg
may return a blank string */
const char *arg0 = ec_GetArgs(0);
if (arg0) {
const char *pc = arg0;
progname_len = strlen(pc);
pc = trim(pc, &progname_len);
a_out = strdup_drhook(pc);
}
}
if (!a_out) {
a_out = strdup_drhook("a.out"); /* Failed to obtain the name of the executing program */
}
}
/*=== c_drhook_watch_ ===*/
void
c_drhook_watch_(const int *onoff,
const char *array_name,
const void *array_ptr,
const int *nbytes,
const int *abort_if_changed,
const int *printkey,
const int *nvals,
const int *print_traceback_when_set
/* Hidden length */
,int array_name_len)
{
int tid = get_thread_id_();
drhook_watch_t *p = NULL;
if (!drhook_lhook) return;
coml_set_lockid_(&DRHOOK_lock);
/* check whether this array_ptr is already registered, but maybe inactive */
p = watch;
while (p) {
if (p->ptr == array_ptr) {
if (p->active) watch_count--;
free_drhook(p->name);
break;
}
p = p->next;
}
if (!p) {
/* create new branch */
p = calloc_drhook(1, sizeof(*p)); /* Implies p->next = NULL */
if (!last_watch) {
last_watch = watch = p;
}
else {
last_watch->next = p;
last_watch = p;
}
}
p->name = strdup2_drhook(array_name,array_name_len);
p->tid = tid;
p->active = *onoff;
if (p->active) watch_count++;
p->abort_if_changed = *abort_if_changed;
p->ptr = array_ptr;
p->nbytes = *nbytes;
p->watch_first_nbytes = MIN(p->nbytes, MAX_WATCH_FIRST_NBYTES);
memcpy(p->first_nbytes,p->ptr,p->watch_first_nbytes);
p->crc32 = 0;
crc32_(p->ptr, &p->nbytes, &p->crc32);
p->printkey = *printkey;
p->nvals = *nvals;
{
char *pfx = PREFIX(p->tid);
int ftnunitno = 0;
int textlen = strlen(pfx) + strlen(p->name) + 256;
char *text = malloc_drhook(textlen * sizeof(*text));
snprintf(text,textlen,
"%s ***Warning: Set watch point '%s' at %p (%d bytes [%d values]) : crc32=%u",
pfx, p->name, p->ptr, p->nbytes, p->nvals, p->crc32);
dr_hook_prt_(&ftnunitno, text, strlen(text));
print_watch(ftnunitno, p->printkey, p->ptr, p->nvals);
free_drhook(text);
if (*print_traceback_when_set) LinuxTraceBack(pfx,TIMESTR(p->tid),NULL);
}
coml_unset_lockid_(&DRHOOK_lock);
}
/*=== c_drhook_start_ ===*/
void
c_drhook_start_(const char *name,
const int *thread_id,
double *key,
const char *filename,
const int *sizeinfo
/* Hidden length */
,int name_len, int filename_len)
{
TIMERS;
equivalence_t u;
ITSELF_0;
if (!signals_set) signal_drhook_init(1);
if (name_len > 0 && opt_funcenter == *thread_id) {
fprintf(stdout,"<e> %d %d %.*s %lld %lld\n",myproc,*thread_id,name_len,name,hwm,stk);
fflush(stdout);
}
if (watch && watch_count > 0) check_watch("when entering routine", name, name_len, 1);
if (drhook_dump_hugepages) {
int tid = *thread_id;
char *pfx = PREFIX(tid);
dump_hugepages(0,pfx,tid,0,-1);
}
if (!opt_callpath) {
u.keyptr = getkey(*thread_id, name, name_len,
filename, filename_len,
&walltime, &cputime,
NULL, 0, NULL);
}
else { /* (Much) more overhead */
int free_callpath = 1;
int callpath_len = 0;
equivalence_t *callpath = get_callpath(*thread_id, &callpath_len);
u.keyptr = getkey(*thread_id, name, name_len,
filename, filename_len,
&walltime, &cputime,
callpath, callpath_len, &free_callpath);
if (free_callpath) free_drhook(callpath);
}
if (cstklen == 0) {
/* Double precision */
*key = u.d;
}
else {
/* Single precision : The variable "*key" is treated like max 4-byte entity -- "an index" */
(void) callstack(*thread_id, key, u.keyptr);
}
ITSELF_1;
if (opt_calltrace) {
coml_set_lockid_(&DRHOOK_lock);
{
const int ftnunitno = 0; /* stderr */
const int print_option = 2; /* calling tree */
int level = 0;
c_drhook_print_(&ftnunitno, thread_id, &print_option, &level);
/* fprintf(stderr,"%d#%d> %*.*s [%llu]\n",myproc,*thread_id,name_len,name_len,name,u.ull); */
}
coml_unset_lockid_(&DRHOOK_lock);
}
if (timeline) {
int tid = *thread_id;
if (opt_timeline_thread <= 0 || tid <= opt_timeline_thread) {
drhook_timeline_t *tl = &timeline[tid-1];
int bigjump = 1;
unsigned long long int mod = (tl->calls[0]++)%opt_timeline_freq;
double rss = (double)(getrss_()/1048576.0); /* in MBytes */
double curheap = (opt_timeline_thread == 1 && tid == 1) ?
(double)(getcurheap_()/1048576.0) : (double)(getcurheap_thread_(&tid)/1048576.0); /* in MBytes */
double stack = (double)(getstk_()/1048576.0); /* in MBytes */
double vmpeak = (double)(getvmpeak_()/1048576.0); /* in MBytes */
if (mod != 0) {
double inc_MB;
inc_MB = tl->last_rss_MB - rss;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_curheap_MB - curheap;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_stack_MB - stack;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_vmpeak_MB - vmpeak;
if (ABS(inc_MB) < opt_timeline_MB) bigjump = 0;
}
if (mod == 0 || bigjump) {
coml_set_lockid_(&DRHOOK_lock);
{
int ftnunitno = opt_timeline_unitno;
const int print_option = 5; /* calling "tree" with just the current entry */
int level = 0;
tl->last_rss_MB = rss;
tl->last_curheap_MB = curheap;
tl->last_stack_MB = stack;
tl->last_vmpeak_MB = vmpeak;
c_drhook_print_(&ftnunitno, &tid, &print_option, &level);
}
coml_unset_lockid_(&DRHOOK_lock);
}
} /* if (opt_timeline_thread <= 0 || tid <= opt_timeline_thread) */
}
if (opt_random_memstat > 0) random_memstat(*thread_id,0);
}
/*=== c_drhook_end_ ===*/
void
c_drhook_end_(const char *name,
const int *thread_id,
const double *key,
const char *filename,
const int *sizeinfo
/* Hidden length */
,int name_len, int filename_len)
{
TIMERS;
equivalence_t u;
ITSELF_0;
if (cstklen == 0) {
/* Double precision */
u.d = *key;
}
else {
/* Single precision : The variable "*key" is treated like max 4-byte entity -- "an index" */
u.keyptr = callstack(*thread_id, (void *)key, NULL);
}
/*
if (opt_calltrace) {
coml_set_lockid_(&DRHOOK_lock);
fprintf(stderr,"%d#%d< %*.*s [%llu]\n",myproc,*thread_id,name_len,name_len,name,u.ull);
coml_unset_lockid_(&DRHOOK_lock);
}
*/
if (name_len > 0 && opt_funcexit == *thread_id) {
fprintf(stdout,"<x> %d %d %.*s %lld %lld\n",myproc,*thread_id,name_len,name,hwm,stk);
fflush(stdout);
}
if (timeline) {
int tid = *thread_id;
if (opt_timeline_thread <= 0 || tid <= opt_timeline_thread) {
drhook_timeline_t *tl = &timeline[tid-1];
int bigjump = 1;
unsigned long long int mod = (tl->calls[1]++)%opt_timeline_freq;
double rss = (double)(getrss_()/1048576.0); /* in MBytes */
double curheap = (opt_timeline_thread == 1 && tid == 1) ?
(double)(getcurheap_()/1048576.0) : (double)(getcurheap_thread_(&tid)/1048576.0); /* in MBytes */
double stack = (double)(getstk_()/1048576.0); /* in MBytes */
double vmpeak = (double)(getvmpeak_()/1048576.0); /* in MBytes */
if (mod != 0) {
double inc_MB;
inc_MB = tl->last_rss_MB - rss;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_curheap_MB - curheap;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_stack_MB - stack;
if (ABS(inc_MB) < opt_timeline_MB) inc_MB = tl->last_vmpeak_MB - vmpeak;
if (ABS(inc_MB) < opt_timeline_MB) bigjump = 0;
}
if (mod == 0 || bigjump) {
coml_set_lockid_(&DRHOOK_lock);
{
int ftnunitno = opt_timeline_unitno;
const int print_option = -5; /* calling "tree" with just the current entry */
int level = 0;
tl->last_rss_MB = rss;
tl->last_curheap_MB = curheap;
tl->last_stack_MB = stack;
tl->last_vmpeak_MB = vmpeak;
c_drhook_print_(&ftnunitno, &tid, &print_option, &level);
}
coml_unset_lockid_(&DRHOOK_lock);
}
} /* if (opt_timeline_thread <= 0 || tid <= opt_timeline_thread) */
}
if (watch && watch_count > 0) check_watch("when leaving routine", name, name_len, 1);
putkey(*thread_id, u.keyptr, name, name_len,
*sizeinfo,
&walltime, &cputime);
ITSELF_1;
}
/*=== c_drhook_memcounter_ ===*/
void
c_drhook_memcounter_(const int *thread_id,
const long long int *size,
long long int *keyptr_addr)
{
int tid = (thread_id && (*thread_id >= 1) && (*thread_id <= numthreads))
? *thread_id : get_thread_id_();
int has_timeline = (timeline && size) ? opt_timeline : 0;
if (has_timeline) {
if (opt_timeline_thread <= 1 || tid <= opt_timeline_thread) {
double size_MB = (double)((*size)/1048576.0); /* In MBytes */
if (ABS(size_MB) < opt_timeline_MB) has_timeline = 0; /* Do not report */
}
else {
has_timeline = 0; /* Do not report */
}
} /* if (has_timeline) */
if (opt_memprof) {
if (size) {
union {
long long int keyptr_addr;
drhook_key_t *keyptr;
} u;
long long int alldelta;
if (*size > 0) { /* Memory is being allocated */
if (curkeyptr[tid-1]) {
drhook_key_t *keyptr = curkeyptr[tid-1];
keyptr->mem_curdelta += *size;
alldelta = keyptr->mem_curdelta + keyptr->mem_child;
if (alldelta > keyptr->maxmem_alldelta) keyptr->maxmem_alldelta = alldelta;
if (keyptr->mem_curdelta > keyptr->maxmem_selfdelta)
keyptr->maxmem_selfdelta = keyptr->mem_curdelta;
if (keyptr_addr) {
u.keyptr = keyptr;
*keyptr_addr = u.keyptr_addr;
}
keyptr->alloc_count++;
}
else {
if (keyptr_addr) *keyptr_addr = 0;
} /* if (curkeyptr[tid-1]) */
/*
fprintf(stderr,
"memcounter: allocated %lld bytes ; *keyptr_addr = %lld\n",
*size, *keyptr_addr);
*/
}
else { /* Memory is being freed */
drhook_key_t *keyptr;
if (keyptr_addr && (*keyptr_addr)) {
u.keyptr_addr = *keyptr_addr;
keyptr = u.keyptr;
}
else
keyptr = curkeyptr[tid-1];
/*
fprintf(stderr,
"memcounter: DE-allocated %lld bytes ; *keyptr_addr = %lld\n",
*size, *keyptr_addr);
*/
if (keyptr) {
long long int prev_curdelta = keyptr->mem_curdelta;
keyptr->mem_curdelta += *size;
alldelta = prev_curdelta + keyptr->mem_child;
if (alldelta > keyptr->maxmem_alldelta) keyptr->maxmem_alldelta = alldelta;
if (*size < 0) keyptr->free_count++;
} /* if (keyptr) */
} /* if (*size > 0) ... else */
} /* if (size) */
} /* if (opt_memprof) */
if (has_timeline) {
double curheap = (opt_timeline_thread == 1 && tid == 1) ?
(double)(getcurheap_()/1048576.0) : (double)(getcurheap_thread_(&tid)/1048576.0); /* in MBytes */
double rss = (double)(getrss_()/1048576.0); /* in MBytes */
double stack = (double)(getstk_()/1048576.0); /* in MBytes */
double vmpeak = (double)(getvmpeak_()/1048576.0); /* in MBytes */
coml_set_lockid_(&DRHOOK_lock);
{
int ftnunitno = opt_timeline_unitno;
double size_MB = (double)((*size)/1048576.0); /* In MBytes */
int print_option = (size_MB > 0) ? 6 : -6; /* timeline upon c_drhook_memcounter_ & (big) ALLOCATE or DEALLOCATE */
int level = 0;
drhook_timeline_t *tl = &timeline[tid-1];
tl->last_curheap_MB = curheap;
tl->last_rss_MB = rss;
tl->last_stack_MB = stack;
tl->last_vmpeak_MB = vmpeak;
c_drhook_print_(&ftnunitno, &tid, &print_option, &level);
}
coml_unset_lockid_(&DRHOOK_lock);
} /* if (has_timeline) */
}
/*=== c_drhook_print_ ===*/
#define PRINT_HWM() \
if (opt_gethwm) { sprintf(s,",hwm=%lldK",keyptr->hwm/1024); s += strlen(s); }
#define PRINT_RSS() \
if (opt_getrss) { \
sprintf(s,",rss/max=%lldK/%lldK",keyptr->rssnow/1024, keyptr->maxrss/1024); \
s += strlen(s); \
}
#define PRINT_STK() \
if (opt_getstk) { \
sprintf(s,",stack/max=%lldK/%lldK",keyptr->stack/1024, keyptr->maxstack/1024); \
s += strlen(s); \
}
#define PRINT_PAG() \
if (opt_getpag) { \
sprintf(s,",pag=%lld",keyptr->paging); \
s += strlen(s); \
}
#define PRINT_WALL() \
if (opt_walltime) { \
double self = keyptr->delta_wall_all-keyptr->delta_wall_child; \
if (self < 0) self = 0; \
sprintf(s,",wall=%.3fs/%.3fs", \
keyptr->delta_wall_all, self); \
s += strlen(s); \
}
#define PRINT_CPU() \
if (opt_cputime) { \
double self = keyptr->delta_cpu_all-keyptr->delta_cpu_child; \
if (self < 0) self = 0; \
sprintf(s,",cpu=%.3fs/%.3fs", \
keyptr->delta_cpu_all, self); \
s += strlen(s); \
}
#define PRINT_CALLS() \
if (opt_calls) { \
sprintf(s,",#%llu,st=%d",keyptr->calls,keyptr->status); \
s += strlen(s); \
}
static int
prof_name_comp(const void *v1, const void *v2)
{
const drhook_prof_t *p1 = v1;
const drhook_prof_t *p2 = v2;
return strcmp(p1->name,p2->name);
}
static int
memprof_name_comp(const void *v1, const void *v2)
{
const drhook_memprof_t *p1 = v1;
const drhook_memprof_t *p2 = v2;
return strcmp(p1->name,p2->name);
}
static int
prof_pc_comp_desc(const void *v1, const void *v2)
{
const drhook_prof_t *p1 = v1;
const drhook_prof_t *p2 = v2;
if (p1->pc < p2->pc) return 1;
else if (p1->pc > p2->pc) return -1;
else return 0;
}
static int
memprof_pc_comp_desc(const void *v1, const void *v2)
{
const drhook_memprof_t *p1 = v1;
const drhook_memprof_t *p2 = v2;
if (p1->pc < p2->pc) return 1;
else if (p1->pc > p2->pc) return -1;
else return 0;
}
static const char *
trim_and_adjust_left(const char *p, int *name_len)
{
int len = strlen(p);
if (len > 0) {
const char *back = &p[len-1];
while (len > 0 && *back-- == ' ') len--;
while (len > 0 && *p == ' ') { p++; len--; }
}
if (name_len) *name_len = len;
return p;
}
static void print_routine_name0(FILE * fp, const char * p_name, int p_tid, const char * p_filename, int p_cluster,
const equivalence_t * p_callpath, int p_callpath_len, int len, int cluster_size)
{
int name_len = 0;
const char *name = trim_and_adjust_left(p_name,&name_len);
if (callpath_packed) {
if (p_callpath && p_callpath_len > 0) {
const equivalence_t * callpath = &p_callpath[p_callpath_len-1];
int j;
for (j=0; j<p_callpath_len; callpath--, j++)
if (callpath && callpath->keyptr && callpath->keyptr->name) {
const char *name = callpath->keyptr->name;
int name_len = callpath->keyptr->name_len;
fprintf(fp,"%.*s/",name_len,name);
}
}
}
fprintf(fp,"%.*s@%d%s%s",
name_len, name,
p_tid,
p_filename ? ":" : "",
p_filename ? p_filename : "");
if (opt_clusterinfo) {
fprintf(fp," [%d,%d]",
p_cluster, ABS(cluster_size));
}
if (!callpath_packed)
unroll_callpath(fp, len, p_callpath, p_callpath_len);
}
#define print_routine_name(fp, p, len, cluster_size) \
if (fp && p) { \
print_routine_name0(fp, p->name, p->tid, p->filename, p->cluster, \
p->callpath, p->callpath_len, len, cluster_size);\
} /* if (fp && p) */
static void
DrHookPrint(int ftnunitno, const char *line)
{
if (line) {
FILE *fp = NULL;
if (ftnunitno <= 0)
fp = stderr;
else if (ftnunitno == 6)
fp = stdout;
else
dr_hook_prt_(&ftnunitno, line, strlen(line));
OPTPRINT(fp,"%s\n",line);
}
}
void
c_drhook_print_(const int *ftnunitno,
const int *thread_id,
const int *print_option, /*
1=raw call counts
2=calling tree
3=profiling info
4=memory profiling
5=timeline upon entering the routine
-5=timeline upon leaving the routine
6=timeline upon c_drhook_memcounter_ & (big) ALLOCATE
-6=timeline upon c_drhook_memcounter_ & (big) DEALLOCATE
7=timeline : the very first call (upon setup or dr.hook)
-7=timeline : the very last call (in atexit())
*/
int *level
)
{
static int first_time = 0;
int tid = (thread_id && (*thread_id >= 1) && (*thread_id <= numthreads))
? *thread_id : get_thread_id_();
int mytid = get_thread_id_();
char *pfx = PREFIX(tid);
if (ftnunitno && keydata && calltree) {
char line[4096];
int abs_print_option = ABS(*print_option);
int j;
/* Mod to call traceback and continue if called with level=99 */
if(*level == 99) {
*level=0;
}
else {
if(*print_option == 2) {
if(first_time == 1) return;
first_time = 1;
}
}
/* end of Mod */
if (*print_option == 1) { /* raw call counts */
for (j=0; j<hashsize; j++) {
int nestlevel = 0;
drhook_key_t *keyptr = &keydata[tid-1][j];
while (keyptr) {
if (keyptr->name) {
char *s = line;
sprintf(s,
"%s %s [%s@%s:%d] [hash#%d,nest=%d] '%s'",
pfx,TIMESTR(tid),FFL,
j,nestlevel,keyptr->name);
s += strlen(s);
PRINT_CALLS();
PRINT_HWM();
PRINT_RSS();
PRINT_STK();
PRINT_PAG();
PRINT_WALL();
PRINT_CPU();
*s = 0;
DrHookPrint(*ftnunitno, line);
}
keyptr = keyptr->next;
nestlevel++;
} /* while (keyptr) */
} /* for (j=0; j<hashsize; j++) */
}
else if (*print_option == 2 ||
abs_print_option == 5 ||
abs_print_option == 6 ||
abs_print_option == 7
) { /* the current calling tree */
drhook_calltree_t *treeptr = calltree[tid-1];
if (*print_option == 2) {
long long int hwm = getmaxhwm_()/1048576;
long long int rss = getmaxrss_()/1048576;
long long int maxstack = getmaxstk_()/1048576;
long long int vmpeak = getvmpeak_()/1048576;
snprintf(line,sizeof(line),
"%s %s [%s@%s:%d] %lld MB (maxheap), %lld MB (maxrss), %lld MB (maxstack), %lld MB (vmpeak)",
pfx,TIMESTR(tid),FFL,
hwm,rss,maxstack,vmpeak);
DrHookPrint(*ftnunitno, line);
}
if (tid > 1) {
if (*print_option == 2) {
/* I'm not a master thread, but my master has the beginning of the calltree */
int initlev = 0;
const int master = 1;
first_time = 0;
c_drhook_print_(ftnunitno, &master, print_option, &initlev);
*level += initlev;
}
else if (tid > opt_timeline_thread) {
return;
}
}
if (abs_print_option == 7) {
treeptr = NULL;
}
else if (abs_print_option == 5 || abs_print_option == 6) {
treeptr = thiscall[tid-1];
}
else {
treeptr = calltree[tid-1];
}
while (abs_print_option == 7 || (treeptr && treeptr->active)) {
int do_print = (*print_option == 2 ||
abs_print_option == 7 ||
abs_print_option == 5 || abs_print_option == 6);
if (do_print) {
drhook_key_t *keyptr = (abs_print_option == 7) ? NULL : treeptr->keyptr;
char *s = line;
char is_timeline = 1, kind;
switch (*print_option) {
case -5: kind = '<'; break;
case -6: kind = '-'; break;
case -7: kind = 'E'; break;
case 5: kind = '>'; break;
case 6: kind = '+'; break;
case 7: kind = 'B'; break;
default:
case 2: kind = ':'; is_timeline = 0; break;
}
if (*print_option == 2 ||
(is_timeline && tid > 1 && tid <= opt_timeline_thread)) {
sprintf(s,"%s %s [%s@%s:%d] %s%c ",
pfx,TIMESTR(tid),FFL,
is_timeline ? "tl:" : "",
kind);
}
else if (is_timeline && opt_timeline_thread == 1 && tid == 1) {
sprintf(s,"%s %s [%s@%s:%d] %s%c ",
pfx,TIMESTR(tid),FFL,
is_timeline ? "tl:" : "",
kind);
}
s += strlen(s);
(*level)++;
for (j=0; j<(*level); j++) *s++ = ' ';
if (*print_option == 2) {
if(mytid != tid) { /* We are printing the master call tree as far as >OMP*/
if(strncmp(">OMP",keyptr->name,4) == 0) {
(*level)--;
return;
}
}
sprintf(s,"%s ",keyptr->name);
s += strlen(s);
}
if (is_timeline) {
double wall = WALLTIME();
double rss, curheap, stack, vmpeak;
drhook_timeline_t *tl = &timeline[tid-1];
if (abs_print_option == 5 || abs_print_option == 6) { /* when called via drhook_begin/_end or memcounter */
curheap = tl->last_curheap_MB;
rss = tl->last_rss_MB;
stack = tl->last_stack_MB;
vmpeak = tl->last_vmpeak_MB;
}
else {
rss = (double)(getrss_()/1048576.0); /* in MBytes */
curheap = (opt_timeline_thread == 1 && tid == 1) ?
(double)(getcurheap_()/1048576.0) : (double)(getcurheap_thread_(&tid)/1048576.0); /* in MBytes */
stack = (double)(getstk_()/1048576.0); /* in MBytes */
vmpeak = (double)(getvmpeak_()/1048576.0); /* in MBytes */
tl->last_curheap_MB = curheap;
tl->last_rss_MB = rss;
tl->last_stack_MB = stack;
tl->last_vmpeak_MB = vmpeak;
}
if (opt_timeline_format == 1) {
sprintf(s, "%.6f %.4g %.4g %.4g %.4g", wall, rss, curheap, stack, vmpeak);
}
else {
sprintf(s,
"wall=%.6f cpu=%.4g hwm=%.4g rss=%.4g curheap=%.4g stack=%.4g vmpeak=%.4g pag=%lld",
wall, CPUTIME(),
(double)(gethwm_()/1048576.0), rss,
curheap,
(double)(getstk_()/1048576.0),
(double)(getvmpeak_()/1048576.0),
getpag_());
}
s += strlen(s);
*s++ = ' ';
if (keyptr) {
sprintf(s,"'%s'",keyptr->name);
}
else {
sprintf(s,"'#PROGRAM %s'",(*print_option == 7) ? "BEGIN" : "END");
}
s += strlen(s);
{
int current_numth = 0;
coml_get_num_threads_(¤t_numth);
sprintf(s,"[#%d]",current_numth);
s += strlen(s);
}
}
else {
PRINT_CALLS();
PRINT_HWM();
PRINT_RSS();
PRINT_STK();
PRINT_PAG();
PRINT_WALL();
PRINT_CPU();
}
*s = 0;
DrHookPrint(*ftnunitno, line);
}
if (abs_print_option == 7 || abs_print_option == 5 || abs_print_option == 6) break;
if (treeptr) treeptr = treeptr->next;
} /* while (abs_print_option == 7 || (treeptr && treeptr->active)) */
}
else if (*print_option == 3) { /* profiling (CPU, wall-clock and/or MFlop/s) */
int len;
int t;
double cumul;
double tottime = 0, max_overhead_pc = 0;
double *tot = NULL;
int nprof = 0;
drhook_prof_t *prof = NULL;
drhook_prof_t *p;
double flop_tot = 0, instr_tot = 0;
double *flop = NULL, *instr = NULL;
if (!opt_wallprof && !opt_cpuprof) return; /* no profiling info available */
if (tid > 1) return; /* just master thread allowed ; takes care of siblings, too */
if (numthreads<=0) return;
if (do_prof_off) return;
do_prof_off = 1;
/* Insert "$drhook" */
if (keyself && opt_self > 1) {
for (t=0; t<numthreads; t++) (void) insertkey(t+1,keyself[t]);
}
flop = calloc_drhook(numthreads, sizeof(*flop));
instr = calloc_drhook(numthreads, sizeof(*instr));
tot = calloc_drhook(numthreads, sizeof(*tot));
for (t=0; t<numthreads; t++) {
for (j=0; j<hashsize; j++) {
drhook_key_t *keyptr = &keydata[t][j];
while (keyptr) {
if (keyptr->name && (keyptr->status == 0 || signal_handler_called)) {
double self;
if (opt_wallprof) {
self = keyptr->delta_wall_all - keyptr->delta_wall_child;
}
else {
self = keyptr->delta_cpu_all - keyptr->delta_cpu_child;
}
/* if (self < 0) self = 0; */
tot[t] += self;
#ifdef HPM
flop[t] += keyptr->avg_mflops * self; /* mflop_count(keyptr); */
instr[t] += keyptr->avg_mipsrate * self; /* mip_count(keyptr); */
#endif
nprof++;
}
keyptr = keyptr->next;
} /* while (keyptr && keyptr->status == 0) */
} /* for (t=0; t<numthreads; t++) */
} /* for (j=0; j<hashsize; j++) */
if (opt_wallprof) { /* a bit unreliable; had not taken max. value of threads wall yet; will be recalculated */
tottime = tot[0] + ((keyself && opt_self > 1) ? keyself[0]->delta_wall_all : 0);
for (t=1; t<numthreads; t++) {
double tmp = tot[t] + ((keyself && opt_self > 1) ? keyself[t]->delta_wall_all : 0);
tottime = MAX(tottime,tmp);
}
}
else { /* ok & reliable (for cpuprof) */
tottime = 0;
for (t=0; t<numthreads; t++) tottime += (tot[t] + ((keyself && opt_self > 1) ? keyself[t]->delta_cpu_all : 0));
}
if (tottime <= 0) tottime = 1e-10;
p = prof = calloc_drhook(nprof + 1, sizeof(*prof)); /* Make sure there is at least one entry */
for (t=0; t<numthreads; t++) {
for (j=0; j<hashsize; j++) {
drhook_key_t *keyptr = &keydata[t][j];
while (keyptr) {
if (keyptr->name && (keyptr->status == 0 || signal_handler_called)) {
p->self = opt_wallprof ?
keyptr->delta_wall_all - keyptr->delta_wall_child :
keyptr->delta_cpu_all - keyptr->delta_cpu_child;
p->total = opt_wallprof ?
keyptr->delta_wall_all :
keyptr->delta_cpu_all;
p->calls = keyptr->calls;
p->name = keyptr->name;
p->pc = (p->self/tottime) * 100.0;
if (p->calls > 0) {
p->percall_ms_self = (p->self/p->calls) * 1000.0;
p->percall_ms_total = (p->total/p->calls) * 1000.0;
}
p->tid = t+1;
p->index = p - prof;
#ifdef HPM
if (opt_hpmprof) {
p->mflops = keyptr->avg_mflops; /* mflops_hpm(keyptr); */
p->mipsrate = keyptr->avg_mipsrate; /* mips_hpm(keyptr); */
p->divpc = divpc_hpm(keyptr);
}
#endif
p->filename = keyptr->filename;
p->sizeinfo = keyptr->sizeinfo;
p->min_sizeinfo = keyptr->min_sizeinfo;
p->max_sizeinfo = keyptr->max_sizeinfo;
p->sizespeed = (p->self > 0 && p->sizeinfo > 0) ? p->sizeinfo/p->self : 0;
p->sizeavg = (p->calls > 0 && p->sizeinfo > 0) ? p->sizeinfo/p->calls : 0;
p->callpath = keyptr->callpath;
p->callpath_len = keyptr->callpath_len;
p++;
}
keyptr = keyptr->next;
} /* while (keyptr && keyptr->status == 0) */
} /* for (j=0; j<hashsize; j++) */
} /* for (t=0; t<numthreads; t++) */
do {
double mflop_rate = 0;
double mip_rate = 0;
int numroutines = 0;
int cluster;
double *maxval = calloc_drhook(nprof+1, sizeof(*maxval)); /* make sure at least 1 element */
int *clusize = calloc_drhook(nprof+1, sizeof(*clusize)); /* make sure at least 1 element */
char *prevname = NULL;
const char *fmt1 = "%5d %8.2f %12.3f %12.3f %12.3f %14llu %11.2f %11.2f %s";
const char *fmt2 = "%5d %8.2f %12.3f %12.3f %12.3f %14llu %7.0f %7.0f %7.1f %s";
const char *fmt = opt_hpmprof ? fmt2 : fmt1;
char *filename = get_mon_out(myproc);
FILE *fp = NULL;
if (!filename) break;
if ((myproc == 1 && mon_out_procs == -1) || mon_out_procs == myproc) {
fprintf(stderr,
"%s %s [%s@%s:%d] Writing profiling information of proc#%d into file '%s'\n",
pfx,TIMESTR(tid),FFL,
myproc,filename);
}
fp = fopen(filename,"w");
if (!fp) goto finish_3;
/* alphanumerical sorting to find out clusters of the same routine but on different threads */
/* also find out total wall clock time */
/* calculate percentage values */
p = prof;
qsort(p, nprof, sizeof(*p), prof_name_comp);
cluster = 0;
maxval[cluster] = p->self;
p->maxval = &maxval[cluster];
clusize[cluster] = 1;
prevname = p->name;
p++;
for (j=1; j<nprof; j++) {
if (!strequ(prevname,p->name)) {
(p-1)->cluster = cluster;
(p-1)->maxval = &maxval[cluster];
prevname = p->name;
cluster++;
}
if (p->self > maxval[cluster]) maxval[cluster] = p->self;
p->cluster = cluster;
p->maxval = &maxval[cluster];
clusize[cluster]++;
p++;
} /* for (j=1; j<nprof; j++) */
numroutines = (nprof > 0) ? (cluster + 1) : 0; /* Active no. of routines */
if (opt_wallprof) tottime = 0;
p = prof;
for (j=0; j<nprof; j++) {
int use_this = 0;
cluster = p->cluster;
if (clusize[cluster] > 1) { /* multiple threads <= numthreads indeed called this routine */
p->is_max = (p->self == *p->maxval);
if (p->is_max) { /* first max found will be used for total time */
clusize[cluster] = -clusize[cluster]; /* ensures that max has been found for this cluster */
use_this = opt_wallprof;
}
}
else if (clusize[cluster] == 1) {
use_this = opt_wallprof;
}
if (use_this && opt_wallprof) tottime += p->self;
p++;
}
if (tottime <= 0) tottime = 1e-10;
if (opt_wallprof) { /* use re-calculated tottime to define percentages */
p = prof;
for (j=0; j<nprof; j++) {
p->pc = (p->self/tottime) * 100.0;
p++;
}
}
/* sorting with respect to percentage value */
p = prof;
qsort(p, nprof, sizeof(*p), prof_pc_comp_desc);
flop_tot = 0;
instr_tot = 0;
max_overhead_pc = 0;
for (t=0; t<numthreads; t++) {
flop_tot += flop[t];
instr_tot += instr[t];
if (overhead) {
max_overhead_pc = MAX(max_overhead_pc,overhead[t]);
#ifdef DEBUG
fprintf(fp,"tid#%d: overhead = %.15g s\n",t+1,overhead[t]);
#endif
}
}
#ifdef DEBUG
fprintf(fp,"max overhead = %.15g s, tottime = %.15g s\n",
max_overhead_pc, tottime);
#endif
if (tottime - max_overhead_pc > 0) {
max_overhead_pc = 100.0*(max_overhead_pc/(tottime - max_overhead_pc));
}
else {
max_overhead_pc = 100;
}
fprintf(fp,
"Profiling information for program='%s', proc#%d:\n",a_out, myproc);
fprintf(fp,"\tNo. of instrumented routines called : %d\n", numroutines);
fprintf(fp,"\tInstrumentation started : %s\n",start_stamp ? start_stamp : "N/A");
end_stamp = timestamp();
fprintf(fp,"\tInstrumentation ended : %s\n",end_stamp ? end_stamp : "N/A");
fprintf(fp,"\tInstrumentation overhead: %.2f%%\n",max_overhead_pc);
{
long long int hwm = getmaxhwm_()/1048576;
long long int rss = getmaxrss_()/1048576;
long long int maxstack = getmaxstk_()/1048576;
long long int vmpeak = getvmpeak_()/1048576;
long long int pag = getpag_();
fprintf(fp,
"\tMemory usage : %lld MB (heap), %lld MB (rss), %lld MB (stack), %lld MB (vmpeak), %lld (paging)\n",
hwm,rss,maxstack,vmpeak,pag);
}
if (opt_hpmprof) {
mflop_rate = flop_tot / tottime;
mip_rate = instr_tot / tottime;
fprintf(fp,
"\t%s-time is %.2f sec on proc#%d, %.0f MFlops (ops#%.0f*10^6), %.0f MIPS (ops#%.0f*10^6) (%d procs, %d threads)\n",
opt_wallprof ? "Wall" : "Total CPU", tottime, myproc,
mflop_rate, flop_tot, mip_rate, instr_tot,
nproc, numthreads);
}
else {
fprintf(fp,
"\t%s-time is %.2f sec on proc#%d (%d procs, %d threads)\n",
opt_wallprof ? "Wall" : "Total CPU", tottime, myproc,
nproc, numthreads);
}
if (myproc == 1) {
fprintf(stderr,
"Profiling information for program='%s', proc#%d:\n",a_out, myproc);
fprintf(stderr,"\tNo. of instrumented routines called : %d\n", numroutines);
fprintf(stderr,"\tInstrumentation started : %s\n",start_stamp ? start_stamp : "N/A");
fprintf(stderr,"\tInstrumentation ended : %s\n",end_stamp ? end_stamp : "N/A");
fprintf(stderr,"\tInstrumentation overhead: %.2f%%\n",max_overhead_pc);
if (opt_hpmprof) {
fprintf(stderr,
"\t%s-time is %.2f sec on proc#%d, %.0f MFlops (ops#%.0f*10^6), %.0f MIPS (ops#%.0f*10^6) (%d procs, %d threads)\n",
opt_wallprof ? "Wall" : "Total CPU", tottime, myproc,
mflop_rate, flop_tot, mip_rate, instr_tot,
nproc, numthreads);
}
else {
fprintf(stderr,
"\t%s-time is %.2f sec on proc#%d (%d procs, %d threads)\n",
opt_wallprof ? "Wall" : "Total CPU", tottime, myproc,
nproc, numthreads);
}
} /* if (myproc == 1) */
free_drhook(end_stamp);
for (t=0; t<numthreads; t++) {
double tmp = 100.0*(tot[t]/tottime);
if (opt_hpmprof && tot[t] > 0) {
mflop_rate = flop[t]/tot[t];
mip_rate = instr[t]/tot[t];
}
else {
mflop_rate = 0;
mip_rate = 0;
}
fprintf( fp,"\tThread#%d: %11.2f sec (%.2f%%)",t+1,tot[t],tmp);
if (opt_hpmprof) fprintf( fp,", %.0f MFlops (ops#%.0f*10^6), %.0f MIPS (ops#%.0f*10^6)", mflop_rate, flop[t], mip_rate, instr[t]);
fprintf( fp,"\n");
if (myproc == 1) {
fprintf(stderr,"\tThread#%d: %11.2f sec (%.2f%%)",t+1,tot[t],tmp);
if (opt_hpmprof) fprintf(stderr,", %.0f MFlops (ops#%.0f*10^6), %.0f MIPS (ops#%.0f*10^6)", mflop_rate, flop[t], mip_rate, instr[t]);
fprintf(stderr,"\n");
}
}
fprintf(fp,"\n");
if (opt_hpmprof) {
len =
fprintf(fp," # %% Time Cumul Self Total # of calls MIPS MFlops Div-%% ");
}
else {
len =
fprintf(fp," # %% Time Cumul Self Total # of calls Self Total ");
}
fprintf(fp,"Routine@<thread-id>");
if (opt_clusterinfo) fprintf(fp," [Cluster:(id,size)]");
fprintf(fp,"\n");
if (opt_sizeinfo) fprintf(fp,"%*s %s\n",len-20," ","(Size; Size/sec; Size/call; MinSize; MaxSize)");
if (opt_hpmprof) {
fprintf(fp, " (self) (sec) (sec) (sec) \n");
}
else {
fprintf(fp, " (self) (sec) (sec) (sec) ms/call ms/call\n");
}
fprintf(fp,"\n");
cumul = 0;
for (j=0; j<nprof; ) {
int cluster_size = clusize[p->cluster];
if (p->pc < percent_limit) break;
if (opt_cputime) {
cumul += p->self;
}
else {
if (p->is_max || cluster_size == 1) cumul += p->self;
}
if (opt_hpmprof) {
fprintf(fp, fmt,
++j, p->pc, cumul, p->self, p->total, p->calls,
p->mipsrate, p->mflops, p->divpc,
p->is_max ? "*" : " ");
}
else {
fprintf(fp, fmt,
++j, p->pc, cumul, p->self, p->total, p->calls,
p->percall_ms_self, p->percall_ms_total,
p->is_max ? "*" : " ");
}
print_routine_name(fp, p, len, cluster_size);
if (opt_sizeinfo && p->sizeinfo > 0) {
char s1[DRHOOK_STRBUF], s2[DRHOOK_STRBUF], s3[DRHOOK_STRBUF];
char s4[DRHOOK_STRBUF], s5[DRHOOK_STRBUF];
lld_commie(p->sizeinfo,s1);
dbl_commie(p->sizespeed,s2);
dbl_commie(p->sizeavg,s3);
lld_commie(p->min_sizeinfo,s4);
lld_commie(p->max_sizeinfo,s5);
fprintf(fp,"\n%*s (%s; %s; %s; %s; %s)",len-20," ",s1,s2,s3,s4,s5);
}
fprintf(fp,"\n");
p++;
} /* for (j=0; j<nprof; ) */
fclose(fp);
finish_3:
free_drhook(filename);
free_drhook(maxval);
free_drhook(clusize);
} while (0);
free_drhook(instr);
free_drhook(flop);
free_drhook(tot);
free_drhook(prof);
do_prof_off = 0;
}
else if (*print_option == 4) { /* Memory profiling */
int t, len;
int nprof = 0;
drhook_memprof_t *prof = NULL;
drhook_memprof_t *p;
long long int *tot;
long long int *maxseen_tot;
double totmaxmem_delta;
if (!opt_memprof) return; /* no profiling info available */
if (tid > 1) return; /* just master thread allowed ; takes care of siblings, too */
if (numthreads<=0) return;
if (do_prof_off) return;
do_prof_off = 1;
tot = calloc_drhook(numthreads, sizeof(*tot));
maxseen_tot = calloc_drhook(numthreads, sizeof(*maxseen_tot));
for (t=0; t<numthreads; t++) {
for (j=0; j<hashsize; j++) {
drhook_key_t *keyptr = &keydata[t][j];
while (keyptr) {
if (keyptr->name && (keyptr->status == 0 || signal_handler_called)) {
long long int self;
self = keyptr->maxmem_selfdelta;
if (self < 0) self = 0;
tot[t] += self;
maxseen_tot[t] = MAX(maxseen_tot[t], keyptr->mem_seenmax);
nprof++;
}
keyptr = keyptr->next;
} /* while (keyptr && keyptr->status == 0) */
} /* for (t=0; t<numthreads; t++) */
} /* for (j=0; j<hashsize; j++) */
totmaxmem_delta = tot[0];
for (t=1; t<numthreads; t++) {
long long int tmp = tot[t];
totmaxmem_delta = MAX(totmaxmem_delta,tmp);
}
if (totmaxmem_delta <= 0) totmaxmem_delta = 1e-10; /* To avoid divide-by-zero */
p = prof = calloc_drhook(nprof + 1, sizeof(*prof)); /* Make sure there is at least one entry */
for (t=0; t<numthreads; t++) {
for (j=0; j<hashsize; j++) {
drhook_key_t *keyptr = &keydata[t][j];
while (keyptr) {
if (keyptr->name && (keyptr->status == 0 || signal_handler_called)) {
p->self = keyptr->maxmem_selfdelta;
p->children = keyptr->mem_child;
p->hwm = keyptr->mem_maxhwm;
p->rss = keyptr->mem_maxrss;
p->stk = keyptr->mem_maxstk;
p->pag = keyptr->mem_maxpagdelta;
p->leaked = keyptr->mem_curdelta;
p->calls = keyptr->calls;
p->alloc_count += keyptr->alloc_count;
p->free_count += keyptr->free_count;
p->name = keyptr->name;
p->pc = (p->self/totmaxmem_delta) * 100.0;
p->tid = t+1;
p->index = p - prof;
p->filename = keyptr->filename;
p->callpath = keyptr->callpath;
p->callpath_len = keyptr->callpath_len;
p++;
}
keyptr = keyptr->next;
} /* while (keyptr && keyptr->status == 0) */
} /* for (t=0; t<numthreads; t++) */
} /* for (j=0; j<hashsize; j++) */
do {
int numroutines = 0;
int cluster;
long long int *maxval = calloc_drhook(nprof+1, sizeof(*maxval)); /* make sure at least 1 element */
int *clusize = calloc_drhook(nprof+1, sizeof(*clusize)); /* make sure at least 1 element */
char *prevname = NULL;
const char *fmt1 = "%5d %9.2f %14lld %14lld %14lld %14lld %14lld %10lld %10llu %10llu%s%10llu %s";
const char *fmt = fmt1;
char *filename = get_memmon_out(myproc);
FILE *fp = NULL;
if (!filename) break;
if ((myproc == 1 && mon_out_procs == -1) || mon_out_procs == myproc) {
fprintf(stderr,"Writing memory-profiling information of proc#%d into file '%s'\n",myproc,filename);
}
fp = fopen(filename,"w");
if (!fp) goto finish_4;
/* alphanumerical sorting to find out clusters of the same routine but on different threads */
p = prof;
qsort(p, nprof, sizeof(*p), memprof_name_comp);
cluster = 0;
maxval[cluster] = p->self;
p->maxval = &maxval[cluster];
clusize[cluster] = 1;
prevname = p->name;
p++;
for (j=1; j<nprof; j++) {
if (!strequ(prevname,p->name)) {
(p-1)->cluster = cluster;
(p-1)->maxval = &maxval[cluster];
prevname = p->name;
cluster++;
}
if (p->self > maxval[cluster]) maxval[cluster] = p->self;
p->cluster = cluster;
p->maxval = &maxval[cluster];
clusize[cluster]++;
p++;
} /* for (j=1; j<nprof; j++) */
numroutines = (nprof > 0) ? (cluster + 1) : 0; /* Active no. of routines */
totmaxmem_delta = 0;
p = prof;
for (j=0; j<nprof; j++) {
int use_this = 0;
cluster = p->cluster;
if (clusize[cluster] > 1) { /* multiple threads <= numthreads indeed called this routine */
p->is_max = (p->self == *p->maxval);
if (p->is_max) { /* first max found will be used for total time */
clusize[cluster] = -clusize[cluster]; /* ensures that max has been found for this cluster */
use_this = 1;
}
}
else if (clusize[cluster] == 1) {
use_this = 1;
}
if (use_this) totmaxmem_delta += p->self;
p++;
}
if (totmaxmem_delta <= 0) totmaxmem_delta = 1e-10; /* To avoid divide-by-zero */
/* use re-calculated totmaxmem_delta to define percentages */
p = prof;
for (j=0; j<nprof; j++) {
p->pc = (p->self/totmaxmem_delta) * 100.0;
p++;
}
/* sorting with respect to percentage value */
p = prof;
qsort(p, nprof, sizeof(*p), memprof_pc_comp_desc);
fprintf(fp,
"Memory-profiling information for program='%s', proc#%d:\n",a_out, myproc);
fprintf(fp,"\tNo. of instrumented routines called : %d\n", numroutines);
fprintf(fp,"\tInstrumentation started : %s\n",start_stamp ? start_stamp : "N/A");
end_stamp = timestamp();
fprintf(fp,"\tInstrumentation ended : %s\n",end_stamp ? end_stamp : "N/A");
{
long long int hwm = gethwm_()/1048576;
long long int rss = getrss_()/1048576;
long long int maxstack = getmaxstk_()/1048576;
long long int vmpeak = getvmpeak_()/1048576;
long long int pag = getpag_();
long long int maxseen = 0;
long long int leaked = 0;
p = prof;
for (j=0; j<nprof; j++) {
if (p->leaked > 0) leaked += p->leaked;
p++;
}
for (t=0; t<numthreads; t++) {
maxseen += maxseen_tot[t];
}
maxseen /= 1048576;
leaked /= 1048576;
fprintf(fp,
"\tMemory usage : %lld MB (max.seen), %lld MB (leaked), %lld MB (heap), %lld MB (max.rss), %lld MB (max.stack), %lld MB (vmpeak), %lld (paging)\n",
maxseen,leaked,hwm,rss,maxstack,vmpeak,pag);
fprintf(fp,"\tNo. of procs/threads: %d procs, %d threads\n",nproc,numthreads);
}
if (myproc == 1) {
fprintf(stderr,
"Memory-profiling information for program='%s', proc#%d:\n",a_out, myproc);
fprintf(stderr,"\tNo. of instrumented routines called : %d\n", numroutines);
fprintf(stderr,"\tInstrumentation started : %s\n",start_stamp ? start_stamp : "N/A");
fprintf(stderr,"\tInstrumentation ended : %s\n",end_stamp ? end_stamp : "N/A");
} /* if (myproc == 1) */
free_drhook(end_stamp);
fprintf(fp,"\n");
len =
fprintf(fp," # Memory-%% Self-alloc + Children Self-Leaked Heap Max.Stack Paging #Calls #Allocs #Frees ");
/*"12345-1234567899-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-123456789012-123456789012"*/
fprintf(fp,"Routine@<thread-id>");
if (opt_clusterinfo) fprintf(fp," [Cluster:(id,size)]");
fprintf(fp,"\n");
fprintf(fp, " (self) (bytes) (bytes) (bytes) (bytes) (bytes) (delta)");
/*"12345-1234567899-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-12345678901234-123456789012-123456789012"*/
fprintf(fp,"\n");
p = prof;
for (j=0; j<nprof; ) {
int cluster_size = clusize[p->cluster];
if (p->pc < percent_limit) break;
t = p->tid - 1;
if (p->children > maxseen_tot[t]) p->children = maxseen_tot[t]; /* adjust */
fprintf(fp, fmt,
++j, p->pc,
p->self, p->children, p->leaked,
p->hwm, p->stk, p->pag,
p->calls, p->alloc_count,
(p->alloc_count - p->free_count != 0) ? "*" : " ", p->free_count,
p->is_max ? "*" : " ");
print_routine_name(fp, p, len, cluster_size);
fprintf(fp,"\n");
p++;
} /* for (j=0; j<nprof; ) */
fclose(fp);
finish_4:
free_drhook(filename);
free_drhook(maxval);
free_drhook(clusize);
} while (0);
free_drhook(tot);
free_drhook(maxseen_tot);
free_drhook(prof);
do_prof_off = 0;
}
}
}
/*=== c_drhook_init_signals_ ===*/
void
c_drhook_init_signals_(const int *enforce)
{
signal_drhook_init(*enforce);
}
/*=== c_drhook_raise_ ===*/
/*
Just a convenience function for Fortran90 which may not have raise()-signal function
CALL c_drhook_raise(10) ! Raise signal#10
*/
void
c_drhook_raise_(const int *sig)
{
fflush(NULL);
raise(*sig);
}
/**** C-interface to Dr.Hook ****/
void
Dr_Hook(const char *name, int option, double *handle,
const char *filename, int sizeinfo,
int name_len, int filename_len)
{
static int first_time = 1;
static int value = 1; /* ON by default */
if (first_time) { /* Not thread safe */
extern void *cdrhookinit_(int *value); /* from ifsaux/support/cdrhookinit.F90 */
cdrhookinit_(&value);
first_time = 0;
}
if (value == 0) return; /* Immediate return if OFF */
if (value != 0) {
int tid = get_thread_id_();
if (option == 0) {
c_drhook_start_(name, &tid, handle,
filename, &sizeinfo,
name_len > 0 ? name_len : strlen(name),
filename_len > 0 ? filename_len : strlen(filename));
}
else if (option == 1) {
c_drhook_end_(name, &tid, handle,
filename, &sizeinfo,
name_len > 0 ? name_len : strlen(name),
filename_len > 0 ? filename_len : strlen(filename));
}
}
}
/**** Interface to HPM ****/
/*<<< experimental >>>*/
#ifdef HPM
#ifdef RS6K
/**** Interface to HPM (RS6K) ****/
#include <pmapi.h>
static pthread_mutex_t hpm_lock = PTHREAD_MUTEX_INITIALIZER;
static int *hpm_tid_init = NULL;
static double cycles = 1300000000.0; /* 1.3GHz ; changed via pm_cycles() in init_hpm() */
#define MCYCLES (cycles * 1e-6)
#define TEST_PM_ERROR(name, rc) \
if (rc != 0) { \
fprintf(stderr,"PM_ERROR(tid#%d, pthread_self()=%d): rc=%d at %s(), line=%d, file=%s\n",\
tid,pthread_self(),rc,name,__LINE__,__FILE__); \
pm_error((char *)name, rc); \
spin(tid); \
RAISE(SIGABRT); \
}
static void
init_hpm(int tid)
{
const char *name = "init_hpm";
int rc;
if (!hpm_tid_init) {
hpm_tid_init = calloc_drhook(numthreads, sizeof(*hpm_tid_init));
cycles = pm_cycles();
}
if (!hpm_tid_init[tid-1]) {
#ifdef PMAPI_POST_P4
pm_info2_t pminfo;
#else
pm_info_t pminfo;
#endif
pm_groups_info_t pmgroupsinfo;
/*------------------------------------*/
/* initialize the performance monitor */
/*------------------------------------*/
#ifdef PMAPI_POST_P4
rc = pm_initialize(PM_VERIFIED | PM_UNVERIFIED | PM_CAVEAT | PM_GET_GROUPS,
&pminfo, &pmgroupsinfo, PM_CURRENT);
#else
rc = pm_init(PM_VERIFIED | PM_UNVERIFIED | PM_CAVEAT | PM_GET_GROUPS,
&pminfo, &pmgroupsinfo);
#endif
TEST_PM_ERROR((char *)name, rc);
if (myproc <= 1) fprintf(stderr,
">>>pm_init() for ECMWF/OpenMP-tid#%d, pthread_self()=%d\n",
tid,pthread_self());
}
if (!hpm_tid_init[tid-1]) {
#if defined(PMAPI_P7)
char *env = getenv("HPM_GROUP");
hpm_grp = atoi(env);
int group;
fprintf(stderr,"hpm_group = %d\n",hpm_grp);
if (hpm_grp == 150) group = 150;
if (hpm_grp == 141) group = 141;
/*-- counters --
case 150:
strcpy(group_label, "pm_vsu23, VSU Execution");
strcpy(label[0], "four flops operation (fdiv,fsqrt) Scalar Instructions only (PM_VSU_FSQRT_FDIV)");
strcpy(label[1], "VSU0 Finished an instruction (PM_VSU_FIN)");
strcpy(label[2], "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only (PM_VSU_FMA)");
strcpy(label[3], "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished (PM_VSU_1FLOP)");
strcpy(label[4], "Run instructions completed(PM_RUN_INST_CMPL)");
strcpy(label[5], "Run cycles (PM_RUN_CYC)");
strcpy(label[6], "Nothing");
strcpy(label[7], "Nothing");
*/
/*-- counters --
case 141:
strcpy(group_label, "pm_vsu14, VSU Execution");
strcpy(label[0], "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished (PM_VSU_1FLOP)");
strcpy(label[1], "four flops operation (scalar fdiv, fsqrt; DP vector version of fmadd, fnmadd, fmsub, SP vector versions of single flop instructions) (PM_VSU_4FLOP)");
strcpy(label[2], "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub) (PM_VSU_8FLOP)");
strcpy(label[3], "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions) (PM_VSU_2FLOP)");
strcpy(label[4], "Run instructions completed(PM_RUN_INST_CMPL)");
strcpy(label[5], "Run cycles (PM_RUN_CYC)");
strcpy(label[6], "Nothing");
strcpy(label[7], "Nothing");
*/
#elif defined(PMAPI_P6)
const int group = 186; /* pm_hpm1 */
/*-- counters --
case 186:
strcpy(group_label, "HPM group");
strcpy(label[0], "FPU executed one flop instruction (PM_FPU_1FLOP)");
strcpy(label[1], "FPU executed multiply-add instruction (PM_FPU_FMA)");
strcpy(label[2], "FPU executed FSQRT or FDIV instruction (PM_FPU_SQRT_FDIV)");
strcpy(label[3], "Processor Cycles (PM_CYC [shared chip])");
strcpy(label[4], "Run instructions completed(PM_RUN_INST_CMPL)");
strcpy(label[5], "Run cycles (PM_RUN_CYC)");
strcpy(label[6], "Nothing");
strcpy(label[7], "Nothing");
*/
#elif defined(PMAPI_P5_PLUS)
/* IBM Power 5+ specific */
const int group = 150; /* pm_hpmcount2 */
/*-- counters -- (from John Hague, IBM/UK, 22-Aug-2006 : Thanx!!)
case 150:
strcpy(group_label, "pm_flop, Floating point operations");
strcpy(label[0], "FPU executed FDIV instruction (PM_FPU_FDIV)");
strcpy(label[1], "FPU executed multiply-add instruction (PM_FPU_FMA)");
strcpy(label[2], "FPU executed FSQRT instruction (PM_FPU_SQRT)");
strcpy(label[3], "FPU executed one flop instruction (PM_FPU_1FLOP)");
strcpy(label[4], "Run instructions completed(PM_RUN_INST_CMPL)");
strcpy(label[5], "Run cycles (PM_RUN_CYC)");
strcpy(label[6], "Nothing");
strcpy(label[7], "Nothing");
*/
#else
const int group = 60; /* pm_hpmcount2 */
/*-- counters --
case 60:
strcpy(group_label, "pm_hpmcount2, Hpmcount group for computation intensity analysis");
strcpy(label[0], "FPU executed FDIV instruction (PM_FPU_FDIV)");
strcpy(label[1], "FPU executed multiply-add instruction (PM_FPU_FMA)");
strcpy(label[2], "FPU0 produced a result (PM_FPU0_FIN)");
strcpy(label[3], "FPU1 produced a result (PM_FPU1_FIN)");
strcpy(label[4], "Processor cycles (PM_CYC)");
strcpy(label[5], "FPU executed store instruction (PM_FPU_STF)");
strcpy(label[6], "Instructions completed (PM_INST_CMPL)");
strcpy(label[7], "LSU executed Floating Point load instruction (PM_LSU_LDF)");
*/
#endif
if (myproc <= 1) fprintf(stderr,"group = %d\n",group);
pm_prog_t pmprog;
pm_data_t pmdata;
int i;
/*---------------------*/
/* set a default group */
/*---------------------*/
for (i=0; i<MAX_COUNTERS; i++) {
pmprog.events[i] = COUNT_NOTHING;
}
pmprog.events[0] = group;
/*-------------------------------------------------------------*/
/* set the mode for user (not kernel) and thread (not process) */
/*-------------------------------------------------------------*/
pmprog.mode.w = 0;
pmprog.mode.b.user = 1;
pmprog.mode.b.process = 0;
/* pmprog.mode.b.process = 1; */
/*------------------------------------------*/
/* for power-4 you have to use event groups */
/*------------------------------------------*/
pmprog.mode.b.is_group = 1;
/*---------------------------------------------------*/
/* set the mode to not to start counting immediately */
/*---------------------------------------------------*/
/* pmprog.mode.b.count = 1; */
pmprog.mode.b.count = 0;
/*-----------------------------------------*/
/* initialize the group and start counting */
/*-----------------------------------------*/
hpm_tid_init[tid-1] = pthread_self(); /* Always > 0 */
rc = pm_set_program_mythread(&pmprog);
TEST_PM_ERROR((char *)name, rc);
rc = pm_start_mythread();
TEST_PM_ERROR((char *)name, rc);
}
}
static void
stop_only_hpm(int tid, drhook_key_t *pstop)
{
const char *name = "stop_only_hpm";
pm_data_t pmdata;
int i, rc;
/* if (numthreads > 1) pthread_mutex_lock(&hpm_lock); */
if (!hpm_tid_init || !hpm_tid_init[tid-1]) init_hpm(tid);
/*
rc = pm_stop_mythread();
TEST_PM_ERROR((char *)name, rc);
*/
if (pstop && !pstop->counter_stopped) {
rc = pm_get_data_mythread(&pmdata);
TEST_PM_ERROR((char *)name, rc);
if (pstop && pstop->counter_in && !pstop->counter_stopped) {
for (i=0; i<MAX_COUNTERS; i++) {
pstop->counter_sum[i] += (pmdata.accu[i] - pstop->counter_in[i]);
}
pstop->counter_stopped = 1;
}
}
/*
rc = pm_start_mythread();
TEST_PM_ERROR((char *)name, rc);
*/
/* if (numthreads > 1) pthread_mutex_unlock(&hpm_lock); */
}
static void
stopstart_hpm(int tid, drhook_key_t *pstop, drhook_key_t *pstart)
{
const char *name = "stopstart_hpm";
pm_data_t pmdata;
int i, rc;
/* if (numthreads > 1) pthread_mutex_lock(&hpm_lock); */
if (!hpm_tid_init || !hpm_tid_init[tid-1]) init_hpm(tid);
/*
rc = pm_stop_mythread();
TEST_PM_ERROR((char *)name, rc);
*/
rc = pm_get_data_mythread(&pmdata);
TEST_PM_ERROR((char *)name, rc);
if (pstop && pstop->counter_in && !pstop->counter_stopped) {
for (i=0; i<MAX_COUNTERS; i++) {
pstop->counter_sum[i] += (pmdata.accu[i] - pstop->counter_in[i]);
}
pstop->counter_stopped = 1;
}
if (pstart) {
if (!pstart->counter_in ) pstart->counter_in = calloc_drhook(MAX_COUNTERS, sizeof(*pstart->counter_in ));
if (!pstart->counter_sum) pstart->counter_sum = calloc_drhook(MAX_COUNTERS, sizeof(*pstart->counter_sum));
for (i=0; i<MAX_COUNTERS; i++) {
pstart->counter_in[i] = pmdata.accu[i];
}
pstart->counter_stopped = 0;
}
/*
rc = pm_start_mythread();
TEST_PM_ERROR((char *)name, rc);
*/
/* if (numthreads > 1) pthread_mutex_unlock(&hpm_lock); */
}
#else
/**** Interface to HPM (CRAY SV2, XD1 and XT3) ****/
static int *hpm_tid_init = NULL;
static double cycles = 0;
#define MCYCLES (cycles * 1e-6)
#define TEST_PM_ERROR(name, rc) \
if (rc != 0) { \
fprintf(stderr,"PM_ERROR(tid#%d, pthread_self()=%d): rc=%d at %s(), line=%d, file=%s\n",\
tid,pthread_self(),rc,name,__LINE__,__FILE__); \
pm_error((char *)name, rc); \
spin(tid); \
RAISE(SIGABRT); \
}
static void
init_hpm(int tid)
{
const char *name = "init_hpm";
int rc;
cycles = irtc_rate_();
}
static void
stop_only_hpm(int tid, drhook_key_t *pstop)
{
const char *name = "stop_only_hpm";
int i, rc;
if (!hpm_tid_init || !hpm_tid_init[tid-1]) init_hpm(tid);
if (pstop && !pstop->counter_stopped) {
if (pstop && pstop->counter_in && !pstop->counter_stopped) {
#if defined(DT_FLOP)
pstop->counter_sum[0] += ((long long int) flop_() - pstop->counter_in[0]);
#if defined(SV2)
pstop->counter_sum[ENTRY_4] += (_rtc() - pstop->counter_in[ENTRY_4]);
#else
pstop->counter_sum[ENTRY_4] += (irtc_() - pstop->counter_in[ENTRY_4]);
#endif
#endif
pstop->counter_stopped = 1;
}
}
}
static void
stopstart_hpm(int tid, drhook_key_t *pstop, drhook_key_t *pstart)
{
const char *name = "stopstart_hpm";
int i, rc;
if (!hpm_tid_init || !hpm_tid_init[tid-1]) init_hpm(tid);
if (pstop && pstop->counter_in && !pstop->counter_stopped) {
#if defined(DT_FLOP)
pstop->counter_sum[0] += ((long long int) flop_() - pstop->counter_in[0]);
#if defined(SV2)
pstop->counter_sum[ENTRY_4] += (_rtc() - pstop->counter_in[ENTRY_4]);
#else
pstop->counter_sum[ENTRY_4] += (irtc_() - pstop->counter_in[ENTRY_4]);
#endif
#endif
pstop->counter_stopped = 1;
}
if (pstart) {
if (!pstart->counter_in ) pstart->counter_in = calloc_drhook(MAX_COUNTERS, sizeof(*pstart->counter_in ));
if (!pstart->counter_sum) pstart->counter_sum = calloc_drhook(MAX_COUNTERS, sizeof(*pstart->counter_sum));
#if defined(DT_FLOP)
pstart->counter_in[0] = (long long int) flop_();
#if defined(SV2)
pstart->counter_in[ENTRY_4] = _rtc();
#else
pstart->counter_in[ENTRY_4] = irtc_();
#endif
#endif
pstart->counter_stopped = 0;
}
}
#endif /*Interface to RS6K and SV2, XD1, XT3 */
static double
mflops_hpm(const drhook_key_t *keyptr)
{
double mflops = 0;
if (keyptr && keyptr->counter_sum && keyptr->counter_sum[ENTRY_4] > 0) {
long long int sum = 0;
#if defined(DT_FLOP)
sum = keyptr->counter_sum[0];
#elif defined(PMAPI_P7)
/* IBM Power 7 specific */
if(hpm_grp == 150) {
sum = 2 * keyptr->counter_sum[2] + keyptr->counter_sum[3];
}
if(hpm_grp == 141) {
sum = 2 * keyptr->counter_sum[0] + 4 * keyptr->counter_sum[1] + 2 * keyptr->counter_sum[3];
}
#elif defined(PMAPI_P6)
/* IBM Power 6 specific */
sum = keyptr->counter_sum[0] + 2 * keyptr->counter_sum[1];
#elif defined(PMAPI_P5_PLUS)
/* IBM Power 5+ specific */
sum = 2 * keyptr->counter_sum[1] + keyptr->counter_sum[3];
#else
sum = keyptr->counter_sum[1] + keyptr->counter_sum[2] + keyptr->counter_sum[3] - keyptr->counter_sum[5];
#endif
if (sum > 0)
mflops = (sum * MCYCLES)/keyptr->counter_sum[ENTRY_4];
}
return mflops;
}
static double
mips_hpm(const drhook_key_t *keyptr)
{
double mipsrate = 0;
#if defined(DT_FLOP)
mipsrate = 0;
#else
if (keyptr && keyptr->counter_sum && keyptr->counter_sum[ENTRY_4] > 0) {
mipsrate = (keyptr->counter_sum[ENTRY_6] * MCYCLES)/keyptr->counter_sum[ENTRY_4];
}
#endif
return mipsrate;
}
static double
divpc_hpm(const drhook_key_t *keyptr)
{
double divpc = 0;
#if defined(DT_FLOP)
divpc = 0;
#else
if (keyptr && keyptr->counter_sum) {
long long int sum = 0;
#if defined(PMAPI_P7)
/* IBM Power 7 specific */
if(hpm_grp == 150) {
sum = 2 * keyptr->counter_sum[2] + keyptr->counter_sum[3];
if (sum > 0) divpc = (keyptr->counter_sum[0]*100.0)/sum;
}
if(hpm_grp == 141) {
sum = 2 * keyptr->counter_sum[0] + 4 * keyptr->counter_sum[1] + 2 * keyptr->counter_sum[3];
if (sum > 0) divpc = (keyptr->counter_sum[1]*100.0)/sum;
}
#elif defined(PMAPI_P6)
/* IBM Power 6 specific */
sum = keyptr->counter_sum[0] + 2 * keyptr->counter_sum[1];
if (sum > 0) divpc = (keyptr->counter_sum[2]*100.0)/sum;
#elif defined(PMAPI_P5_PLUS)
/* IBM Power 5+ specific */
sum = 2 * keyptr->counter_sum[1] + keyptr->counter_sum[3];
if (sum > 0) divpc = (keyptr->counter_sum[0]*100.0)/sum;
#else
sum = keyptr->counter_sum[1] + keyptr->counter_sum[2] + keyptr->counter_sum[3] - keyptr->counter_sum[5];
if (sum > 0) divpc = (keyptr->counter_sum[0]*100.0)/sum;
#endif
}
#endif
return divpc;
}
static double
mflop_count(const drhook_key_t *keyptr)
{
double sum = 0;
if (keyptr && keyptr->counter_sum && keyptr->counter_sum[ENTRY_4] > 0) {
#if defined(DT_FLOP)
sum = (keyptr->counter_sum[0]) * 1e-6;
#elif defined(PMAPI_P7)
/* IBM Power 7 specific */
if(hpm_grp == 150) {
sum = (2 * keyptr->counter_sum[2] + keyptr->counter_sum[3]) * 1e-6;
}
if(hpm_grp == 141) {
sum = (2 * keyptr->counter_sum[0] + 4 * keyptr->counter_sum[1] + 2 * keyptr->counter_sum[3]) * 1e-6;
}
#elif defined(PMAPI_P6)
/* IBM Power 6 specific */
sum = (keyptr->counter_sum[0] + 2 * keyptr->counter_sum[1]) * 1e-6;
#elif defined(PMAPI_P5_PLUS)
/* IBM Power 5+ specific */
sum = (2 * keyptr->counter_sum[1] + keyptr->counter_sum[3]) * 1e-6;
#else
sum = (keyptr->counter_sum[1] + keyptr->counter_sum[2] + keyptr->counter_sum[3] - keyptr->counter_sum[5]) * 1e-6;
#endif
if (sum < 0) sum = 0;
}
return sum;
}
static double
mip_count(const drhook_key_t *keyptr)
{
double sum = 0;
#if defined(DT_FLOP)
sum = 0;
#else
if (keyptr && keyptr->counter_sum && keyptr->counter_sum[ENTRY_4] > 0) {
sum = keyptr->counter_sum[ENTRY_6] * 1e-6;
}
#endif
return sum;
}
#endif /* HPM */
/*
this is result of moving some code from libodb.a
(odb/aux/util_ccode.c) for use by libifsaux.a
directly ; simplifies linking sequences.
*/
#include <stdio.h>
#include <string.h>
/* #include <malloc.h> */
#include <stdlib.h>
#include <signal.h>
#define FORTRAN_CALL
#if defined(CRAY) && !defined(SV2)
#define util_cputime_ UTIL_CPUTIME
#define util_walltime_ UTIL_WALLTIME
#endif
/* Portable CPU-timer (User + Sys) ; also WALL CLOCK-timer */
#include <unistd.h>
#include <sys/types.h>
#include <sys/times.h>
#undef MIN
#undef MAX
#include <sys/param.h>
#include <sys/time.h>
#if !defined(VPP)
FORTRAN_CALL
double util_walltime_()
{
static double time_init = -1;
double time_in_secs;
#if !defined(CRAYXT)
struct timeval tbuf;
if (gettimeofday(&tbuf,NULL) == -1) perror("UTIL_WALLTIME");
if (time_init == -1) time_init =
(double) tbuf.tv_sec + (tbuf.tv_usec / 1000000.0);
time_in_secs =
(double) tbuf.tv_sec + (tbuf.tv_usec / 1000000.0) - time_init;
#else
if (time_init == -1) time_init = dclock();
time_in_secs = dclock() - time_init;
#endif
return time_in_secs;
}
#if defined(CRAYXT)
/* Cray XT3/XT4 with catamount microkernel */
FORTRAN_CALL
double util_cputime_()
{
return util_walltime_(); /* In absence of anything better */
}
#else
extern clock_t times (struct tms *buffer);
FORTRAN_CALL
double util_cputime_()
{
struct tms tbuf;
static int first_time = 1;
static double clock_ticks = 0;
(void) times(&tbuf);
if (first_time) {
clock_ticks = (double) sysconf(_SC_CLK_TCK);
first_time = 0;
}
return (tbuf.tms_utime + tbuf.tms_stime +
tbuf.tms_cutime + tbuf.tms_cstime) / clock_ticks;
}
#endif
#else
/* VPP */
FORTRAN_CALL
double util_walltime_()
{
double w, time_in_secs;
static double wallref = 0;
extern FORTRAN_CALL gettod_(double *);
if (wallref == 0) gettod_(&wallref);
gettod_(&w);
time_in_secs = (w - wallref) * 0.000001;
return time_in_secs;
}
#endif
#ifdef VPP
#include <sys/types.h>
#include <sys/param.h>
#include <sys/signal.h>
#include <sys/fault.h>
#include <sys/syscall.h>
#include <sys/procfs.h>
#include <sys/proc.h>
#include <fcntl.h>
static int fujitsu_getrusage(int who, struct rusage *rusage)
{
int rc = -1;
if (rusage) rusage->ru_maxrss = 0;
if (who == RUSAGE_SELF && rusage) {
static int maxrss = 0;
static int oldpid = -1;
static char procfile[20] = "";
static char *pf = NULL;
/* static prpsinfo_t ps; */
static proc_t proc;
int pid = getpid();
static int fildes = -1;
unsigned int size;
if (oldpid != pid) {
oldpid = pid;
maxrss = 0;
pf = NULL;
}
if (!pf) {
sprintf(procfile,"/proc/%d",pid);
pf = procfile;
fildes = open(procfile, O_RDONLY);
}
if (fildes == -1) return rc;
/*
if (ioctl(fildes, PIOCPSINFO, &ps) == -1) {
perror("ioctl@fujitsu_getrusage(PIOCPSINFO)");
return rc;
}
*/
if (ioctl(fildes, PIOCGETPR, &proc) == -1) {
perror("ioctl@fujitsu_getrusage(PIOCGETPR)");
return rc;
}
size = /* ps.pr_usevpmem + */ proc.p_brksize + proc.p_stksize;
if (size > maxrss) maxrss = size;
rusage->ru_maxrss = maxrss;
/* close(fildes); */
rc = 0;
}
return rc;
}
#endif /* VPP */
FORTRAN_CALL
int util_ihpstat_(int *option)
{
int ret_value = 0;
#if defined(SGI) || defined(VPP)
if (*option == 1) {
struct rusage rusage;
#ifdef SGI
int pagesize = 1024;
getrusage(0, &rusage);
#endif
#ifdef VPP
int pagesize = 1; /* getpagesize() */
fujitsu_getrusage(0, &rusage);
#endif
#if defined(SV2)
int pagesize = getpagesize();
getrusage(0, &rusage);
#endif
#if defined(XT3)
int pagesize = getpagesize();
getrusage(0, &rusage);
#endif
#if defined(XD1)
int pagesize = getpagesize();
getrusage(0, &rusage);
#endif
ret_value = (rusage.ru_maxrss * pagesize + 7) / 8; /* In 8 byte words */
}
#endif /* SGI or VPP */
return ret_value;
}
#ifndef __timer_t_defined
static void set_timed_kill()
{
// Definition of timer_t, timer_create, timer_set
// is a POSIX extention, not available on e.g. Darwin
}
#else
static void set_timed_kill()
{
#if !defined MACOSX
if (drhook_timed_kill) {
const char delim[] = ", \t/";
char *p, *s = strdup_drhook(drhook_timed_kill);
p = strtok(s,delim);
while (p) {
int target_myproc, target_omptid, target_sig;
double start_time;
int nelems = sscanf(p,"%d:%d:%d:%lf",
&target_myproc, &target_omptid, &target_sig, &start_time);
int ntids = 1;
coml_get_max_threads_(&ntids);
if (nelems == 4 &&
(target_myproc == myproc || target_myproc == -1) &&
(target_omptid == -1 || (target_omptid >= 1 && target_omptid <= ntids)) &&
(target_sig >= 1 && target_sig <= NSIG) &&
start_time > 0) {
#if 1
{
extern void run_fortran_omp_parallel_ipfipipipdpstr_(const int *,
void (*func)(const int *, const int *, const int *, const double *, const char *, int len),
const int *, const int *, const int *, const double *, const char *, int len);
run_fortran_omp_parallel_ipfipipipdpstr_(&ntids,set_killer_timer,
&ntids,&target_omptid,&target_sig,&start_time,p,strlen(p));
}
#else
#pragma omp parallel num_threads(ntids)
{
set_killer_timer(&ntids,&target_omptid,&target_sig,&start_time,p,strlen(p));
}
#endif
}
p = strtok(NULL,delim);
}
free_drhook(s);
}
#endif
}
#endif
|
GB_binop__rminus_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int16)
// A*D function (colscale): GB (_AxD__rminus_int16)
// D*A function (rowscale): GB (_DxB__rminus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int16)
// C=scalar+B GB (_bind1st__rminus_int16)
// C=scalar+B' GB (_bind1st_tran__rminus_int16)
// C=A+scalar GB (_bind2nd__rminus_int16)
// C=A'+scalar GB (_bind2nd_tran__rminus_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.