source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <cstring>
#include <memory>
#include <random>
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include "Constants.h"
#include "common/Schema.h"
#include "query/SearchOnIndex.h"
#include "segcore/SegmentGrowingImpl.h"
#include "segcore/SegmentSealedImpl.h"
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<uint8_t> rows_;
std::vector<aligned_vector<uint8_t>> cols_;
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
RowBasedRawData raw_;
template <typename T>
auto
get_col(int index) const {
auto& target = cols_.at(index);
std::vector<T> ret(target.size() / sizeof(T));
memcpy(ret.data(), target.data(), target.size());
return ret;
}
template <typename T>
auto
get_mutable_col(int index) {
auto& target = cols_.at(index);
assert(target.size() == row_ids_.size() * sizeof(T));
auto ptr = reinterpret_cast<T*>(target.data());
return ptr;
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset);
void
generate_rows(int64_t N, SchemaPtr schema);
};
inline void
GeneratedData::generate_rows(int64_t N, SchemaPtr schema) {
std::vector<int> offset_infos(schema->size() + 1, 0);
auto sizeof_infos = schema->get_sizeof_infos();
std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1);
int64_t len_per_row = offset_infos.back();
assert(len_per_row == schema->get_total_sizeof());
// change column-based data to row-based data
std::vector<uint8_t> result(len_per_row * N);
for (int index = 0; index < N; ++index) {
for (int fid = 0; fid < schema->size(); ++fid) {
auto len = sizeof_infos[fid];
auto offset = offset_infos[fid];
auto src = cols_[fid].data() + index * len;
auto dst = result.data() + index * len_per_row + offset;
memcpy(dst, src, len);
}
}
rows_ = std::move(result);
raw_.raw_data = rows_.data();
raw_.sizeof_per_row = schema->get_total_sizeof();
raw_.count = N;
}
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0) {
using std::vector;
std::vector<aligned_vector<uint8_t>> cols;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_cols = [&cols](auto& data) {
using T = std::remove_reference_t<decltype(data)>;
auto len = sizeof(typename T::value_type) * data.size();
auto ptr = aligned_vector<uint8_t>(len);
memcpy(ptr.data(), data.data(), len);
cols.emplace_back(std::move(ptr));
};
for (auto& field : schema->get_fields()) {
switch (field.get_data_type()) {
case engine::DataType::VECTOR_FLOAT: {
auto dim = field.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final);
break;
}
case engine::DataType::VECTOR_BINARY: {
auto dim = field.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data);
break;
}
case engine::DataType::INT64: {
vector<int64_t> data(N);
// begin with counter
if (starts_with(field.get_name().get(), "counter")) {
int64_t index = 0;
for (auto& x : data) {
x = index++;
}
} else {
int i = 0;
for (auto& x : data) {
x = er() % (2 * N);
x = i;
i++;
}
}
insert_cols(data);
break;
}
case engine::DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::INT16: {
vector<int16_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::INT8: {
vector<int8_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
case engine::DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.cols_ = std::move(cols);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i + ts_offset);
}
// std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er);
res.generate_rows(N, schema);
return res;
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.num_queries_;
int64_t topk = sr.topk_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.ids_[index]) + "->" + std::to_string(sr.distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
info.blob = dataset.row_ids_.data();
info.row_count = dataset.row_ids_.size();
info.field_id = 0; // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
info.blob = dataset.timestamps_.data();
info.row_count = dataset.timestamps_.size();
info.field_id = 1;
seg.LoadFieldData(info);
}
int field_offset = 0;
for (auto& meta : seg.get_schema().get_fields()) {
LoadFieldDataInfo info;
info.field_id = meta.get_id().get();
info.row_count = row_count;
info.blob = dataset.cols_[field_offset].data();
seg.LoadFieldData(info);
++field_offset;
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) {
auto segment = CreateSealedSegment(schema);
SealedLoader(dataset, *segment);
segment->LoadIndex(index_info);
return segment;
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
} // namespace milvus::segcore
|
kernels.c | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "common.h"
void cpu_stencilGPU(float c0, float c1, float *A0, float *Anext, const int nx,
const int ny, const int nz) {
int i, j, k;
int max = nz - 1;
#pragma omp target map(to : A0[ : nx *ny *nz]) \
map(tofrom : Anext[ : nx *ny *nz]) device(DEVICE_ID)
#pragma omp parallel for
for (k = 1; k < max; k++) {
for (j = 1; j < ny - 1; j++) {
for (i = 1; i < nx - 1; i++) {
Anext[Index3D(nx, ny, i, j, k)] = (A0[Index3D(nx, ny, i, j, k + 1)] +
A0[Index3D(nx, ny, i, j, k - 1)] +
A0[Index3D(nx, ny, i, j + 1, k)] +
A0[Index3D(nx, ny, i, j - 1, k)] +
A0[Index3D(nx, ny, i + 1, j, k)] +
A0[Index3D(nx, ny, i - 1, j, k)]) *
c1 -
A0[Index3D(nx, ny, i, j, k)] * c0;
}
}
}
}
void cpu_stencilCPU(float c0, float c1, float *A0, float *Anext, const int nx,
const int ny, const int nz) {
int i, j, k;
for (k = 1; k < nz - 1; k++) {
for (j = 1; j < ny - 1; j++) {
for (i = 1; i < nx - 1; i++) {
Anext[Index3D(nx, ny, i, j, k)] = (A0[Index3D(nx, ny, i, j, k + 1)] +
A0[Index3D(nx, ny, i, j, k - 1)] +
A0[Index3D(nx, ny, i, j + 1, k)] +
A0[Index3D(nx, ny, i, j - 1, k)] +
A0[Index3D(nx, ny, i + 1, j, k)] +
A0[Index3D(nx, ny, i - 1, j, k)]) *
c1 -
A0[Index3D(nx, ny, i, j, k)] * c0;
}
}
}
}
|
GB_binop__rdiv_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int64)
// A*D function (colscale): GB (_AxD__rdiv_int64)
// D*A function (rowscale): GB (_DxB__rdiv_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int64)
// C=scalar+B GB (_bind1st__rdiv_int64)
// C=scalar+B' GB (_bind1st_tran__rdiv_int64)
// C=A+scalar GB (_bind2nd__rdiv_int64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT64 || GxB_NO_RDIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
host_function.c | #include <stdio.h>
#include <omp.h>
#pragma omp declare target
void hostrpc_fptr0(void* fun_ptr);
#pragma omp end declare target
// A host function will synchronously call from a device as a function pointer
void myfun() {
fprintf(stderr, " This is myfun writing to stderr \n");
}
int main()
{
int N = 10;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++){
a[i]=0;
b[i]=i;
}
//void (*fun_ptr)(int) = &myfun;
void (*fun_ptr)() = &myfun;
printf("Testing myfun execution as a function pointer \n");
(*fun_ptr)();
printf("Testing myfun execution from device using hostrpc_fptr0\n");
#pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N]) is_device_ptr(fun_ptr)
for (int j = 0; j< N; j++) {
a[j+1]=b[j];
hostrpc_fptr0(fun_ptr);
}
printf("Testing the host fallback of hostrpc_fptr0 \n");
hostrpc_fptr0(fun_ptr);
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc){
printf("Success\n");
return EXIT_SUCCESS;
} else{
printf("Failure\n");
return EXIT_FAILURE;
}
}
|
GB_is_diagonal.c | //------------------------------------------------------------------------------
// GB_is_diagonal: check if A is a diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Returns true if A is a square diagonal matrix, with all diagonal entries
// present. All pending tuples are ignored. Zombies are treated as entries.
#include "GB_mxm.h"
#include "GB_atomics.h"
bool GB_is_diagonal // true if A is diagonal
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT_MATRIX_OK (A, "A check diag", GB0) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
int64_t n = GB_NROWS (A) ;
int64_t ncols = GB_NCOLS (A) ;
if (n != ncols)
{
// A is rectangular
return (false) ;
}
int64_t anz = GB_NNZ (A) ;
int64_t nvec = A->nvec ;
if (n != anz || n != nvec)
{
// A must have exactly n entries in n vectors. A can be sparse or
// hypersparse. If hypersparse, all vectors must be present, so
// Ap has size n+1 whether sparse or hypersparse.
return (false) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// Break the work into lots of tasks so the early-exit can be exploited.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (256 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// examine each vector of A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
int diagonal = true ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// check for early exit
//----------------------------------------------------------------------
int diag = true ;
{
GB_ATOMIC_READ
diag = diagonal ;
}
if (!diag) continue ;
//----------------------------------------------------------------------
// check if vectors jstart:jend-1 are diagonal
//----------------------------------------------------------------------
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; diag && j < jend ; j++)
{
int64_t p = Ap [j] ;
int64_t ajnz = Ap [j+1] - p ;
if (ajnz != 1)
{
// A(:,j) must have exactly one entry
diag = false ;
}
int64_t i = Ai [p] ;
if (i != j)
{
// the single entry must be A(i,i)
diag = false ;
}
}
//----------------------------------------------------------------------
// early exit: tell all other tasks to halt
//----------------------------------------------------------------------
if (!diag)
{
GB_ATOMIC_WRITE
diagonal = false ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
if (diagonal) A->nvec_nonempty = n ;
return ((bool) diagonal) ;
}
|
GB_binop__pair_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint64)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint64_t
// A type: uint64_t
// A pattern? 1
// B type: uint64_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT64 || GxB_NO_PAIR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
YAKL_mem_transfers.h |
#pragma once
// Included by YAKL.h
namespace yakl {
// Your one-stop shop for memory transfers to / from host / device
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_host_to_host(T1 *dst , T2 *src , index_t elems) {
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
}
inline void memcpy_host_to_host_void(void *dst , void *src , size_t bytes) {
memcpy( dst , src , bytes );
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_device_to_host(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_host");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyDeviceToHost,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyDeviceToHost,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_host");
#endif
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_host_to_device(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_host_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyHostToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyHostToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_host_to_device");
#endif
}
template <class T1, class T2,
typename std::enable_if< std::is_same< typename std::remove_cv<T1>::type ,
typename std::remove_cv<T2>::type >::value , int >::type = 0>
inline void memcpy_device_to_device(T1 *dst , T2 *src , index_t elems) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,elems*sizeof(T1),cudaMemcpyDeviceToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,elems*sizeof(T1),hipMemcpyDeviceToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, elems*sizeof(T1));
check_last_error();
#elif defined(YAKL_ARCH_OPENMP)
#pragma omp parallel for
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#else
for (index_t i=0; i<elems; i++) { dst[i] = src[i]; }
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_device");
#endif
}
inline void memcpy_device_to_device_void(void *dst , void *src , size_t bytes) {
#ifdef YAKL_AUTO_PROFILE
timer_start("YAKL_internal_memcpy_device_to_device");
#endif
#ifdef YAKL_ARCH_CUDA
cudaMemcpyAsync(dst,src,bytes,cudaMemcpyDeviceToDevice,0);
check_last_error();
#elif defined(YAKL_ARCH_HIP)
hipMemcpyAsync(dst,src,bytes,hipMemcpyDeviceToDevice,0);
check_last_error();
#elif defined (YAKL_ARCH_SYCL)
sycl_default_stream().memcpy(dst, src, bytes);
check_last_error();
#else
memcpy( dst , src , bytes );
#endif
#if defined(YAKL_AUTO_FENCE)
fence();
#endif
#ifdef YAKL_AUTO_PROFILE
timer_stop("YAKL_internal_memcpy_device_to_device");
#endif
}
}
|
test_core.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2015 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* RELIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with RELIC. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
*
* Tests for configuration management.
*
* @ingroup test
*/
#include <stdio.h>
#include "relic.h"
#include "relic_test.h"
#if MULTI == PTHREAD
void *master(void *ptr) {
int *code = (int *)ptr;
core_init();
THROW(ERR_NO_MEMORY);
if (err_get_code() != STS_ERR) {
*code = STS_ERR;
} else {
*code = STS_OK;
}
core_clean();
return NULL;
}
void *tester(void *ptr) {
int *code = (int *)ptr;
core_init();
if (err_get_code() != STS_OK) {
*code = STS_ERR;
} else {
*code = STS_OK;
}
core_clean();
return NULL;
}
#endif
int main(void) {
int code = STS_ERR;
/* Initialize library with default configuration. */
if (core_init() != STS_OK) {
core_clean();
return 1;
}
util_banner("Tests for the CORE module:\n", 0);
TEST_ONCE("the library context is consistent") {
TEST_ASSERT(core_get() != NULL, end);
} TEST_END;
TEST_ONCE("switching the library context is correct") {
ctx_t new_ctx, *old_ctx;
/* Backup the old context. */
old_ctx = core_get();
/* Switch the library context. */
core_set(&new_ctx);
/* Reinitialize library with new context. */
core_init();
/* Run function to manipulate the library context. */
THROW(ERR_NO_MEMORY);
core_set(old_ctx);
TEST_ASSERT(err_get_code() == STS_OK, end);
core_set(&new_ctx);
TEST_ASSERT(err_get_code() == STS_ERR, end);
/* Now we need to finalize the new context. */
core_clean();
/* And restore the original context. */
core_set(old_ctx);
} TEST_END;
code = STS_OK;
#if MULTI == OPENMP
TEST_ONCE("library context is thread-safe") {
omp_set_num_threads(CORES);
#pragma omp parallel shared(code)
{
if (omp_get_thread_num() == 0) {
THROW(ERR_NO_MEMORY);
if (err_get_code() != STS_ERR) {
code = STS_ERR;
}
} else {
core_init();
if (err_get_code() != STS_OK) {
code = STS_ERR;
}
core_clean();
}
}
TEST_ASSERT(code == STS_OK, end);
} TEST_END;
#endif
#if MULTI == PTHREAD
TEST_ONCE("library context is thread-safe") {
pthread_t thread[CORES];
int result[CORES] = { STS_OK };
for (int i = 0; i < CORES; i++) {
if (i == 0) {
if (pthread_create(&(thread[0]), NULL, master, &(result[0]))) {
code = STS_ERR;
}
} else {
if (pthread_create(&(thread[i]), NULL, tester, &(result[i]))) {
code = STS_ERR;
}
}
if (result[i] != STS_OK) {
code = STS_ERR;
}
}
for (int i = 0; i < CORES; i++) {
if (pthread_join(thread[i], NULL)) {
code = STS_ERR;
}
}
TEST_ASSERT(code == STS_OK, end);
} TEST_END;
#endif
util_banner("All tests have passed.\n", 0);
end:
core_clean();
return code;
}
|
GB_unop__lgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lgamma_fp64_fp64)
// op(A') function: GB (_unop_tran__lgamma_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = lgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = lgamma (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lgamma_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lgamma_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HGEMM_gen.h | #include <iostream>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <cholUtils.h>
#ifndef HALIDE_ATTRIBUTE_ALIGN
#ifdef _MSC_VER
#define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x))
#else
#define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x)))
#endif
#endif
#ifndef BUFFER_T_DEFINED
#define BUFFER_T_DEFINED
#include <stdbool.h>
#include <stdint.h>
typedef struct buffer_t {
uint64_t dev;
uint8_t* host;
int32_t extent[4];
int32_t stride[4];
int32_t min[4];
int32_t elem_size;
HALIDE_ATTRIBUTE_ALIGN(1) bool host_dirty;
HALIDE_ATTRIBUTE_ALIGN(1) bool dev_dirty;
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t _padding[10 - sizeof(void *)];
} buffer_t;
#endif
#define __user_context_ NULL
struct halide_filter_metadata_t;
extern "C" {
void *sympiler_malloc(void *ctx, size_t s){return(malloc(s));}
void sympiler_free(void *ctx, void *ptr){free(ptr);};
}
#ifdef _WIN32
float roundf(float);
double round(double);
#else
inline float asinh_f32(float x) {return asinhf(x);}
inline float acosh_f32(float x) {return acoshf(x);}
inline float atanh_f32(float x) {return atanhf(x);}
inline double asinh_f64(double x) {return asinh(x);}
inline double acosh_f64(double x) {return acosh(x);}
inline double atanh_f64(double x) {return atanh(x);}
#endif
inline float sqrt_f32(float x) {return sqrtf(x);}
inline float sin_f32(float x) {return sinf(x);}
inline float asin_f32(float x) {return asinf(x);}
inline float cos_f32(float x) {return cosf(x);}
inline float acos_f32(float x) {return acosf(x);}
inline float tan_f32(float x) {return tanf(x);}
inline float atan_f32(float x) {return atanf(x);}
inline float sinh_f32(float x) {return sinhf(x);}
inline float cosh_f32(float x) {return coshf(x);}
inline float tanh_f32(float x) {return tanhf(x);}
inline float hypot_f32(float x, float y) {return hypotf(x, y);}
inline float exp_f32(float x) {return expf(x);}
inline float log_f32(float x) {return logf(x);}
inline float pow_f32(float x, float y) {return powf(x, y);}
inline float floor_f32(float x) {return floorf(x);}
inline float ceil_f32(float x) {return ceilf(x);}
inline float round_f32(float x) {return roundf(x);}
inline double sqrt_f64(double x) {return sqrt(x);}
inline double sin_f64(double x) {return sin(x);}
inline double asin_f64(double x) {return asin(x);}
inline double cos_f64(double x) {return cos(x);}
inline double acos_f64(double x) {return acos(x);}
inline double tan_f64(double x) {return tan(x);}
inline double atan_f64(double x) {return atan(x);}
inline double sinh_f64(double x) {return sinh(x);}
inline double cosh_f64(double x) {return cosh(x);}
inline double tanh_f64(double x) {return tanh(x);}
inline double hypot_f64(double x, double y) {return hypot(x, y);}
inline double exp_f64(double x) {return exp(x);}
inline double log_f64(double x) {return log(x);}
inline double pow_f64(double x, double y) {return pow(x, y);}
inline double floor_f64(double x) {return floor(x);}
inline double ceil_f64(double x) {return ceil(x);}
inline double round_f64(double x) {return round(x);}
inline float nan_f32() {return NAN;}
inline float neg_inf_f32() {return -INFINITY;}
inline float inf_f32() {return INFINITY;}
inline bool is_nan_f32(float x) {return x != x;}
inline bool is_nan_f64(double x) {return x != x;}
inline float float_from_bits(uint32_t bits) {
union {
uint32_t as_uint;
float as_float;
} u;
u.as_uint = bits;
return u.as_float;
}
inline int64_t make_int64(int32_t hi, int32_t lo) {
return (((int64_t)hi) << 32) | (uint32_t)lo;
}
inline double make_float64(int32_t i0, int32_t i1) {
union {
int32_t as_int32[2];
double as_double;
} u;
u.as_int32[0] = i0;
u.as_int32[1] = i1;
return u.as_double;
}
template<typename A, typename B> A reinterpret(B b) {A a; memcpy(&a, &b, sizeof(a)); return a;}
double one [2]={1.0,0.}, zero [2]={0.,0.};
int sw = false, lb1 = 0, ub1 = 0;
double *cur; int info=0;
#ifdef __cplusplus
extern "C" {
#endif
int32_t HGEMM(double *D,
double *B, double *VT, uint64_t *Dptr, uint64_t *Bptr, int32_t *VTptr, int32_t *lchildren, int32_t *rchildren, int32_t *levelset, int32_t *idx, double *mrhs,
double *apres, int32_t nrhs, int32_t *Ddim, int32_t *wptr, int32_t *uptr, double *wskel, int32_t *wskeloffset, double *uskel, int32_t *uskeloffset, int32_t *lm,
int32_t *slen, int32_t *nblockSet, int32_t *nblocks, int32_t *npairx, int32_t *npairy, int32_t *fblockSet, int32_t *fblocks, int32_t *fpairx, int32_t *fpairy, int32_t *wpart,
int32_t *clevelset) {
#pragma omp parallel for
for (int i = 0; i < 256; i++)
{
int32_t _0 = i + 1;
for (int j = nblockSet[i]; j < nblockSet[_0]; j++)
{
int32_t _1 = j + 1;
for (int k = nblocks[j]; k < nblocks[_1]; k++)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
Ddim[npairx[k]],nrhs,Ddim[npairy[k]],
float_from_bits(1065353216 /* 1 */), &D[Dptr[k]],
Ddim[npairx[k]], &mrhs[wptr[npairy[k]]], Ddim[npairy[k]], float_from_bits(1065353216 /* 1 */),
&apres[uptr[npairx[k]]], Ddim[npairx[k]]);
} // for k
} // for j
} // for i
for (int i = 0; i < 5; i++)
{
int32_t _2 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_2]; k++)
{
int32_t _3 = k + 1;
for (int j = wpart[k]; j < wpart[_3]; j++)
{
int32_t _4 = (int32_t)(4294967295);
bool _5 = lchildren[idx[j]] == _4;
if (_5)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,Ddim[lm[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &mrhs[wptr[lm[idx[j]]]], Ddim[lm[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _5
else
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[lchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &wskel[wskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]], float_from_bits(0 /* 0 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
int32_t _6 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _7 = _6 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[idx[j]],nrhs,slen[rchildren[idx[j]]],
float_from_bits(1065353216 /* 1 */), &VT[_7],
slen[idx[j]], &wskel[wskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]], float_from_bits(1065353216 /* 1 */),
&wskel[wskeloffset[idx[j]]], slen[idx[j]]);
} // if _5 else
} // for j
} // for k
} // for i
#pragma omp parallel for
for (int i = 0; i < 256; i++)
{
int32_t _8 = i + 1;
for (int j = fblockSet[i]; j < fblockSet[_8]; j++)
{
int32_t _9 = j + 1;
for (int k = fblocks[j]; k < fblocks[_9]; k++)
{
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
slen[fpairx[k]],nrhs,slen[fpairy[k]],
float_from_bits(1065353216 /* 1 */), &B[Bptr[k]],
slen[fpairx[k]], &wskel[wskeloffset[fpairy[k]]], slen[fpairy[k]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[fpairx[k]]], slen[fpairx[k]]);
} // for k
} // for j
} // for i
int32_t _10 = 0 - 1;
int32_t _11 = 5 - 1;
for (int i = _11; i > _10; i--)
{
int32_t _12 = i + 1;
#pragma omp parallel for
for (int k = clevelset[i]; k < clevelset[_12]; k++)
{
int32_t _13 = wpart[k] - 1;
int32_t _14 = k + 1;
int32_t _15 = wpart[_14] - 1;
for (int j = _15; j > _13; j--)
{
int32_t _16 = (int32_t)(4294967295);
bool _17 = lchildren[idx[j]] == _16;
if (_17)
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
Ddim[lm[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&apres[uptr[lm[idx[j]]]], Ddim[lm[idx[j]]]);
} // if _17
else
{
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[lchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[VTptr[idx[j]]],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[lchildren[idx[j]]]], slen[lchildren[idx[j]]]);
int32_t _18 = slen[idx[j]] * slen[lchildren[idx[j]]];
int32_t _19 = _18 + VTptr[idx[j]];
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
slen[rchildren[idx[j]]],nrhs,slen[idx[j]],
float_from_bits(1065353216 /* 1 */), &VT[_19],
slen[idx[j]], &uskel[uskeloffset[idx[j]]], slen[idx[j]], float_from_bits(1065353216 /* 1 */),
&uskel[uskeloffset[rchildren[idx[j]]]], slen[rchildren[idx[j]]]);
} // if _17 else
} // for j
} // for k
} // for i
return 0;
}
#ifdef __cplusplus
} // extern "C"
#endif
|
GB_unaryop__ainv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_int32
// op(A') function: GB_tran__ainv_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_int32
(
int32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
level.c | //------------------------------------------------------------------------------------------------------------------------------
// 1. support of non-even distribution ?
//
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
// #ifdef USE_MPI
// #include <mpi.h>
// #endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "level.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
#define BOX_ALIGN_1ST_CELL 4096 // Align to TLB page...
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_UPCXX
extern hclib::upcxx::shared_array< hclib::upcxx::global_ptr<double>, 1 > upc_buf_info;
extern hclib::upcxx::shared_array< hclib::upcxx::global_ptr<box_type>, 1> upc_box_info;
extern hclib::upcxx::shared_array< int, 1> upc_int_info;
#endif
//------------------------------------------------------------------------------------------------------------------------------
void print_communicator(int printSendRecv, int rank, int level, communicator_type *comm){
int i;
printf("rank=%2d level=%d ",rank,level);
if(printSendRecv & 0x1){
printf("num_sends=%2d ",comm->num_sends);
printf("send_ranks=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_ranks[i]);printf("] ");
printf("send_sizes=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_sizes[i]);printf("] ");
printf("send_buffers=[ ");for(i=0;i<comm->num_sends;i++)printf("%08lx ",(uint64_t)comm->send_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[0];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[0][i].dim.i,comm->blocks[0][i].dim.j,comm->blocks[0][i].dim.k,comm->blocks[0][i].read.i,comm->blocks[0][i].read.j,comm->blocks[0][i].read.k,comm->blocks[0][i].read.jStride,comm->blocks[0][i].read.kStride,comm->blocks[0][i].write.i,comm->blocks[0][i].write.j,comm->blocks[0][i].write.k,comm->blocks[0][i].write.jStride,comm->blocks[0][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x2){
for(i=0;i<comm->num_blocks[1];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[1][i].dim.i,comm->blocks[1][i].dim.j,comm->blocks[1][i].dim.k,comm->blocks[1][i].read.i,comm->blocks[1][i].read.j,comm->blocks[1][i].read.k,comm->blocks[1][i].read.jStride,comm->blocks[1][i].read.kStride,comm->blocks[1][i].write.i,comm->blocks[1][i].write.j,comm->blocks[1][i].write.k,comm->blocks[1][i].write.jStride,comm->blocks[1][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x4){
printf("num_recvs=%2d ",comm->num_recvs);
printf("recv_ranks=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_ranks[i]);printf("] ");
printf("recv_sizes=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_sizes[i]);printf("] ");
printf("recv_buffers=[ ");for(i=0;i<comm->num_recvs;i++)printf("%08lx ",(uint64_t)comm->recv_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k,comm->blocks[2][i].read.i,comm->blocks[2][i].read.j,comm->blocks[2][i].read.k,comm->blocks[2][i].read.jStride,comm->blocks[2][i].read.kStride,comm->blocks[2][i].write.i,comm->blocks[2][i].write.j,comm->blocks[2][i].write.k,comm->blocks[2][i].write.jStride,comm->blocks[2][i].write.kStride);
// for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d pos %d from %d for %d]\n", comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k, i, comm->blocks[2][i].read.box, MYTHREAD);
printf("\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
typedef struct {
int sendRank;
int sendBoxID;
int sendBox;
int sendDir;
int recvRank;
int recvBoxID;
int recvBox;
} GZ_type;
int qsortGZ(const void *a, const void*b){
GZ_type *gza = (GZ_type*)a;
GZ_type *gzb = (GZ_type*)b;
// by convention, MPI buffers are first sorted by sendRank
if(gza->sendRank < gzb->sendRank)return(-1);
if(gza->sendRank > gzb->sendRank)return( 1);
// then by sendBoxID
if(gza->sendBoxID < gzb->sendBoxID)return(-1);
if(gza->sendBoxID > gzb->sendBoxID)return( 1);
// and finally by the direction sent
if(gza->sendDir < gzb->sendDir)return(-1);
if(gza->sendDir > gzb->sendDir)return( 1);
return(0);
}
int qsortInt(const void *a, const void *b){
int *ia = (int*)a;
int *ib = (int*)b;
if(*ia < *ib)return(-1);
if(*ia > *ib)return( 1);
return( 0);
}
//------------------------------------------------------------------------------------------------------------------------------
int create_box(box_type *box, int numVectors, int dim, int ghosts){
uint64_t memory_allocated = 0;
box->numVectors = numVectors;
box->dim = dim;
box->ghosts = ghosts;
#ifndef BOX_SIMD_ALIGNMENT
#define BOX_SIMD_ALIGNMENT 1 // allignment requirement for j+/-1
#endif
box->jStride = (dim+2*ghosts);while(box->jStride % BOX_SIMD_ALIGNMENT)box->jStride++; // pencil
#ifndef BOX_PLANE_PADDING
#define BOX_PLANE_PADDING 8 // scratch space to avoid unrolled loop cleanup
#endif
box->kStride = box->jStride*(dim+2*ghosts); // plane
if(box->jStride<BOX_PLANE_PADDING)box->kStride += (BOX_PLANE_PADDING-box->jStride); // allow the ghost zone to be clobbered...
while(box->kStride % BOX_SIMD_ALIGNMENT)box->kStride++;
#if 0
// pad each plane such that
// 1. it is greater than a multiple of the maximum unrolling (or vector length)
// 2. it carries the same alignement as the plane above/below. i.e. if ijk is aligned, ijkk+/-plane is aligned
//int MaxUnrolling = 8; // 2-way SIMD x unroll by 4 = 8/thread
int MaxUnrolling = 16; // 4-way SIMD x unroll by 4 = 16/thread
//int MaxUnrolling = 32; // 8-way SIMD x unroll by 4 = 32/thread
int paddingToAvoidStencilCleanup = 0;
if(box->jStride < (MaxUnrolling-1)){paddingToAvoidStencilCleanup = (MaxUnrolling-1)-(box->jStride);} // allows partial clobbering of ghost zone
//box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0xF) & ~0xF; // multiple of 128 bytes
box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x7) & ~0x7; // multiple of 64 bytes (required for MIC)
//box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x3) & ~0x3; // multiple of 32 bytes (required for AVX/QPX)
//box->kStride =( ((dim+2*ghosts)*box->jStride)+paddingToAvoidStencilCleanup+0x1) & ~0x1; // multiple of 16 bytes (required for SSE)
#endif
box->volume = (dim+2*ghosts)*box->kStride;
#ifdef USE_UPCXX
box->vectors = hclib::upcxx::allocate<hclib::upcxx::global_ptr<double> >(MYTHREAD, box->numVectors);
memory_allocated += box->numVectors*sizeof(hclib::upcxx::global_ptr<double>);
if((box->numVectors>0)&&(box->vectors==NULL)){fprintf(stderr,"malloc failed - create_box/box->vectors\n");exit(0);}
uint64_t malloc_size = box->volume*box->numVectors + BOX_ALIGN_1ST_CELL/sizeof(double);
box->vectors_base = hclib::upcxx::allocate<double>(MYTHREAD, malloc_size);
memory_allocated += malloc_size;
if((box->numVectors>0)&&(box->vectors_base==NULL)){fprintf(stderr,"malloc failed - create_box/box->vectors_base\n");exit(0);}
double * tmpbuf = (double *)box->vectors_base;
int num = 0;
memset(tmpbuf,0,malloc_size*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf
while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++; num++;} // allign first *non-ghost* zone element of first component to page boundary...
int c;
for(c=0;c<box->numVectors;c++){
box->vectors[c] = box->vectors_base + c*box->volume + num;
}
#else
// allocate an array of pointers to vectors...
box->vectors = (double **)malloc(box->numVectors*sizeof(double*));
memory_allocated += box->numVectors*sizeof(double*);
if((box->numVectors>0)&&(box->vectors==NULL)){fprintf(stderr,"malloc failed - create_box/box->vectors\n");exit(0);}
// allocate one aligned, double-precision array and divide it among vectors...
uint64_t malloc_size = box->volume*box->numVectors*sizeof(double) + BOX_ALIGN_1ST_CELL; // shift pointer by up to 1 TLB page...
box->vectors_base = (double*)malloc(malloc_size);
memory_allocated += malloc_size;
if((box->numVectors>0)&&(box->vectors_base==NULL)){fprintf(stderr,"malloc failed - create_box/box->vectors_base\n");exit(0);}
double * tmpbuf = box->vectors_base;
while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++;} // allign first *non-ghost* zone element of first component to page boundary...
memset(tmpbuf,0,box->volume*box->numVectors*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf
int c;for(c=0;c<box->numVectors;c++){box->vectors[c] = tmpbuf + c*box->volume;}
#endif // USE_UPCXX
// done... return the total amount of memory allocated...
return(memory_allocated);
}
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_UPCXX
void add_vectors_to_box(box_type *box, int numAdditionalVectors){
if(numAdditionalVectors<=0)return; // nothing to do
hclib::upcxx::global_ptr<double> old_bp = box->vectors_base;
hclib::upcxx::global_ptr<double> old_v0 = box->vectors[0];
hclib::upcxx::global_ptr<hclib::upcxx::global_ptr<double> > old_v = box->vectors;
box->numVectors+=numAdditionalVectors; //
box->vectors = hclib::upcxx::allocate<hclib::upcxx::global_ptr<double> >(MYTHREAD, box->numVectors);
if((box->numVectors>0)&&(box->vectors==NULL)){fprintf(stderr,"malloc failed - add_vectors_to_box/box->vectors\n");exit(0);}
// NOTE !!! realloc() cannot guarantee the same alignment... malloc, allign, copy...
uint64_t malloc_size = box->volume*box->numVectors + BOX_ALIGN_1ST_CELL/sizeof(double); // shift pointer by up to 1 TLB page...
box->vectors_base = hclib::upcxx::allocate<double>(MYTHREAD, malloc_size);
if((box->numVectors>0)&&(box->vectors_base==NULL)){fprintf(stderr,"malloc failed - add_vectors_to_box/box->vectors_base\n");exit(0);}
double * tmpbuf = (double *)box->vectors_base;
memset(tmpbuf,0,malloc_size*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf
int num = 0;
while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++; num++;} // allign first *non-ghost* zone element of first component to page boundary...
memcpy(tmpbuf,(double *)old_v0,box->volume*(box->numVectors-numAdditionalVectors)*sizeof(double));
// cout << "NEW BASE " << box->vectors_base + num << " local " << tmpbuf << " num " << num << endl;
int c;for(c=0;c<box->numVectors;c++){box->vectors[c] = box->vectors_base + c*box->volume + num;} // pointer arithmetic...
hclib::upcxx::deallocate(old_bp);
hclib::upcxx::deallocate(old_v );
}
#else
void add_vectors_to_box(box_type *box, int numAdditionalVectors){
if(numAdditionalVectors<=0)return; // nothing to do
double * old_bp = box->vectors_base; // save a pointer to the base pointer for subsequent free...
double * old_v0 = box->vectors[0]; // save a pointer to the old FP data...
double ** old_v = box->vectors; // save a pointer to the old array of pointers...
box->numVectors+=numAdditionalVectors; //
box->vectors = (double **)malloc(box->numVectors*sizeof(double*)); // new array of pointers vectors
if((box->numVectors>0)&&(box->vectors==NULL)){fprintf(stderr,"malloc failed - add_vectors_to_box/box->vectors\n");exit(0);}
// NOTE !!! realloc() cannot guarantee the same alignment... malloc, allign, copy...
uint64_t malloc_size = box->volume*box->numVectors*sizeof(double) + BOX_ALIGN_1ST_CELL; // shift pointer by up to 1 TLB page...
box->vectors_base = (double*)malloc(malloc_size);
if((box->numVectors>0)&&(box->vectors_base==NULL)){fprintf(stderr,"malloc failed - add_vectors_to_box/box->vectors_base\n");exit(0);}
double * tmpbuf = box->vectors_base;
while( (uint64_t)(tmpbuf+box->ghosts*(1+box->jStride+box->kStride)) & (BOX_ALIGN_1ST_CELL-1) ){tmpbuf++;} // allign first *non-ghost* zone element of first component to page boundary...
memset(tmpbuf,0,box->volume*box->numVectors*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf
memcpy(tmpbuf,old_v0,box->volume*(box->numVectors-numAdditionalVectors)*sizeof(double)); // copy any existant data over...
int c;for(c=0;c<box->numVectors;c++){box->vectors[c] = tmpbuf + c*box->volume;} // pointer arithmetic...
free(old_bp); // all vectors were created from one malloc + pointer arithmetic...
free(old_v ); // free the list of pointers...
}
#endif // USE_UPCXX
//------------------------------------------------------------------------------------------------------------------------------
void destroy_box(box_type *box){
hclib::upcxx::deallocate(box->vectors_base); // single allocate with pointer arithmetic...
hclib::upcxx::deallocate(box->vectors);
}
//------------------------------------------------------------------------------------------------------------------------------
// should implement a 3D hilbert curve on non pow2 (but cubical) domain sizes
//void decompose_level_hilbert(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
//}
//------------------------------------------------------------------------------------------------------------------------------
void decompose_level_lex(int *rank_of_box, int idim, int jdim, int kdim, int ranks){
// simple lexicographical decomposition of the domain (i-j-k ordering)
int boxes = idim*jdim*kdim;
int i,j,k;
for(k=0;k<kdim;k++){
for(j=0;j<jdim;j++){
for(i=0;i<idim;i++){
int b = k*jdim*idim + j*idim + i;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)b)/(uint64_t)boxes; // ranks*b can be as larger than ranks^2... can over flow int
}}}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection_special(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
// recursive bisection (or prime-section) of the domain
// can lead to imbalance unless the number of processes and number of boxes per process are chosen well
#define numPrimes 13
//int primes[numPrimes] = {41,37,31,29,23,19,17,13,11,7,5,3,2};
int primes[numPrimes] = {2,3,5,7,11,13,17,19,23,29,31,37,41};
int i,j,k,p,f,ff;
// base case, no further recursion...
if( (ranks==1)|| ((idim==1)&&(jdim==1)&&(kdim==1)) ){
for(i=ilo;i<ilo+idim;i++){
for(j=jlo;j<jlo+jdim;j++){
for(k=klo;k<klo+kdim;k++){
int b = i + j*jStride + k*kStride;
rank_of_box[b] = rank_lo;
}}}
return;
}
// special cases for perfectly matched problem sizes with numbers of processes (but not powers of 2)...
for(p=0;p<numPrimes;p++){
f=primes[p];
if( (kdim>=idim)&&(kdim>=jdim) ){if( (kdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+ff*kdim/f,idim,jdim,kdim/f,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (jdim>=idim)&&(jdim>=kdim) ){if( (jdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+ff*jdim/f,klo,idim,jdim/f,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (idim>=jdim)&&(idim>=kdim) ){if( (idim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+ff*idim/f,jlo,klo,idim/f,jdim,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)idim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)jdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)kdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,rank_lo+r0,r1); // hi
return;
}
fprintf(stderr,"decompose_level_bisection_special failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
int b = ilo + jlo*jStride + klo*kStride;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)sfc_offset)/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return;
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int sfc_delta = dim0*jdim*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int sfc_delta = idim*dim0*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int sfc_delta = idim*jdim*dim0;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// failure...
fprintf(stderr,"decompose_level_bisection failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void print_decomposition(level_type *level){
if(level->my_rank!=0)return;
printf("\n");
int i,j,k;
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
for(k=level->boxes_in.k-1;k>=0;k--){ // (i,j,k)=(0,0,0) is bottom left corner
for(j=level->boxes_in.j-1;j>=0;j--){ // (i,j)=(0,0) is bottom left corner
for(i=0;i<j;i++)printf(" ");
for(i=0;i<level->boxes_in.i;i++){
int b = i + j*jStride + k*kStride;
printf("%4d ",level->rank_of_box[b]);
}printf("\n");
}printf("\n\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
#ifndef BLOCK_LIST_MIN_SIZE
#define BLOCK_LIST_MIN_SIZE 1000
#endif
void append_block_to_list(blockCopy_type ** blocks, int *allocated_blocks, int *num_blocks,
int dim_i, int dim_j, int dim_k,
#ifdef USE_UPCXX
hclib::upcxx::global_ptr<box_type> read_boxgp,
#endif
int read_box, double* read_ptr, int read_i, int read_j, int read_k, int read_jStride, int read_kStride, int read_scale,
#ifdef USE_UPCXX
hclib::upcxx::global_ptr<box_type> write_boxgp,
#endif
int write_box, double* write_ptr, int write_i, int write_j, int write_k, int write_jStride, int write_kStride, int write_scale,
int blockcopy_tile_i, int blockcopy_tile_j, int blockcopy_tile_k,
int subtype
){
int ii,jj,kk;
// Take a dim_j x dim_k iteration space and tile it into smaller faces of size blockcopy_tile_j x blockcopy_tile_k
// This increases the number of blockCopies in the ghost zone exchange and thereby increases the thread-level parallelism
// FIX... move from lexicographical ordering of tiles to recursive (e.g. z-mort)
// read_/write_scale are used to stride appropriately when read and write loop iterations spaces are different
// ghostZone: read_scale=1, write_scale=1
// interpolation: read_scale=1, write_scale=2
// restriction: read_scale=2, write_scale=1
// FIX... dim_i,j,k -> read_dim_i,j,k, write_dim_i,j,k
for(kk=0;kk<dim_k;kk+=blockcopy_tile_k){
for(jj=0;jj<dim_j;jj+=blockcopy_tile_j){
for(ii=0;ii<dim_i;ii+=blockcopy_tile_i){
int dim_k_mod = dim_k-kk;if(dim_k_mod>blockcopy_tile_k)dim_k_mod=blockcopy_tile_k;
int dim_j_mod = dim_j-jj;if(dim_j_mod>blockcopy_tile_j)dim_j_mod=blockcopy_tile_j;
int dim_i_mod = dim_i-ii;if(dim_i_mod>blockcopy_tile_i)dim_i_mod=blockcopy_tile_i;
if(*num_blocks >= *allocated_blocks){
int oldSize = *allocated_blocks;
if(*allocated_blocks == 0){*allocated_blocks=BLOCK_LIST_MIN_SIZE;*blocks=(blockCopy_type*) malloc( (*allocated_blocks)*sizeof(blockCopy_type));}
else{*allocated_blocks*=2; *blocks=(blockCopy_type*)realloc((void*)(*blocks),(*allocated_blocks)*sizeof(blockCopy_type));}
if(*blocks == NULL){fprintf(stderr,"realloc failed - append_block_to_list (%d -> %d)\n",oldSize,*allocated_blocks);exit(0);}
}
(*blocks)[*num_blocks].subtype = subtype;
(*blocks)[*num_blocks].dim.i = dim_i_mod;
(*blocks)[*num_blocks].dim.j = dim_j_mod;
(*blocks)[*num_blocks].dim.k = dim_k_mod;
#ifdef USE_UPCXX
(*blocks)[*num_blocks].read.boxgp = read_boxgp;
#endif
(*blocks)[*num_blocks].read.box = read_box;
(*blocks)[*num_blocks].read.ptr = read_ptr;
(*blocks)[*num_blocks].read.i = read_i + read_scale*ii;
(*blocks)[*num_blocks].read.j = read_j + read_scale*jj;
(*blocks)[*num_blocks].read.k = read_k + read_scale*kk;
(*blocks)[*num_blocks].read.jStride = read_jStride;
(*blocks)[*num_blocks].read.kStride = read_kStride;
#ifdef USE_UPCXX
(*blocks)[*num_blocks].write.boxgp = write_boxgp;
#endif
(*blocks)[*num_blocks].write.box = write_box;
(*blocks)[*num_blocks].write.ptr = write_ptr;
(*blocks)[*num_blocks].write.i = write_i + write_scale*ii;
(*blocks)[*num_blocks].write.j = write_j + write_scale*jj;
(*blocks)[*num_blocks].write.k = write_k + write_scale*kk;
(*blocks)[*num_blocks].write.jStride = write_jStride;
(*blocks)[*num_blocks].write.kStride = write_kStride;
(*num_blocks)++;
}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that traverses the domain boundary intersecting with this process's boxes
// This includes faces, corners, and edges
void build_boundary_conditions(level_type *level, int justFaces){
level->boundary_condition.blocks[justFaces] = NULL; // default for periodic (i.e. no BC's)
level->boundary_condition.num_blocks[justFaces] = 0; // default for periodic (i.e. no BC's)
level->boundary_condition.allocated_blocks[justFaces] = 0; // default for periodic (i.e. no BC's)
if(level->boundary_condition.type == BC_PERIODIC)return;
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
int box, di,dj,dk;
for(box=0;box<level->num_my_boxes;box++){ // traverse my list of boxes...
for(dk=-1;dk<=1;dk++){ // for each box, examine its 26 neighbors...
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;
// determine if this region (box's di,dj,dk ghost zone) is outside of the domain
int regionIsOutside=0;
int normal = 13; // normal effectively defines the normal vector to the domain for this region...
// this addition is necessary for linearly interpolated BC's as a box's corner is not necessarily a domain's corner
box_type* lbox = (box_type *)&(level->my_boxes[box]);
int myBox_i = lbox->low.i / level->box_dim;
int myBox_j = lbox->low.j / level->box_dim;
int myBox_k = lbox->low.k / level->box_dim;
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( neighborBox_i < 0 ){regionIsOutside=1;normal-=1;}
if( neighborBox_j < 0 ){regionIsOutside=1;normal-=3;}
if( neighborBox_k < 0 ){regionIsOutside=1;normal-=9;}
if( neighborBox_i >=level->boxes_in.i ){regionIsOutside=1;normal+=1;}
if( neighborBox_j >=level->boxes_in.j ){regionIsOutside=1;normal+=3;}
if( neighborBox_k >=level->boxes_in.k ){regionIsOutside=1;normal+=9;}
// calculate ghost zone region size and coordinates relative to the first non-ghost zone element (0,0,0)
int block_i=-1,block_j=-1,block_k=-1;
int dim_i=-1, dim_j=-1, dim_k=-1;
switch(di){
case -1:dim_i=level->box_ghosts;block_i=0-level->box_ghosts;break;
case 0:dim_i=level->box_dim; block_i=0; break;
case 1:dim_i=level->box_ghosts;block_i=0+level->box_dim; break;
}
switch(dj){
case -1:dim_j=level->box_ghosts;block_j=0-level->box_ghosts;break;
case 0:dim_j=level->box_dim; block_j=0; break;
case 1:dim_j=level->box_ghosts;block_j=0+level->box_dim; break;
}
switch(dk){
case -1:dim_k=level->box_ghosts;block_k=0-level->box_ghosts;break;
case 0:dim_k=level->box_dim; block_k=0; break;
case 1:dim_k=level->box_ghosts;block_k=0+level->box_dim; break;
}
if(justFaces && (faces[dir]==0))regionIsOutside=0;
if(regionIsOutside){
append_block_to_list(&(level->boundary_condition.blocks[justFaces]),&(level->boundary_condition.allocated_blocks[justFaces]),&(level->boundary_condition.num_blocks[justFaces]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
#ifdef USE_UPCXX
/* read.boxgp = */ &level->my_boxes[box],
#endif
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ block_i,
/* read.j = */ block_j,
/* read.k = */ block_k,
/* read.jStride = */ lbox->jStride,
/* read.kStride = */ lbox->kStride,
/* read.scale = */ 1,
#ifdef USE_UPCXX
/* write.boxgp = */ &level->my_boxes[box],
#endif
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ block_i,
/* write.j = */ block_j,
/* write.k = */ block_k,
/* write.jStride = */ lbox->jStride,
/* write.kStride = */ lbox->kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_I, // BC's may never tile smaller than the ghost zone depth
/* blockcopy_j = */ BLOCKCOPY_TILE_J < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_J, // BC's may never tile smaller than the ghost zone depth
/* blockcopy_k = */ BLOCKCOPY_TILE_K < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_K, // BC's may never tile smaller than the ghost zone depth
/* subtype = */ normal
);
}}}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that packs data into MPI recv buffers, exchanges local data, and unpacks the MPI send buffers
// broadly speaking...
// 1. traverse my list of Boxes and create a list of ghosts that must be sent
// 2. create a list of neighbors to send to
// 3. allocate and populate the pack list and allocate the send buffers
// 4. allocate and populate the local exchange list
// 5. traverse my list of Boxes and create a list of ghosts that must be received
// 6. create a list of neighbors to receive from
// 7. allocate and populate the unpack list and allocate the recv buffers
//
// thus a ghost zone exchange is
// 1. prepost a Irecv for each MPI recv buffer (1 per neighbor)
// 2. traverse the pack list
// 3. post the Isends for each MPI send buffer (1 per neighbor)
// 4. traverse the local copy list
// 5. waitall
// 6. traverse the unpack list
//
// / 24 25 26 /
// / 21 22 23 / (k+1)
// / 18 19 20 /
//
// / 15 16 17 /
// / 12 13 14 / (k)
// / 9 10 11 /
//
// / 6 7 8 /
// / 3 4 5 / (k-1)
// / 0 1 2 /
//
void build_exchange_ghosts(level_type *level, int justFaces){
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
level->exchange_ghosts[justFaces].num_recvs = 0;
level->exchange_ghosts[justFaces].num_sends = 0;
level->exchange_ghosts[justFaces].blocks[0] = NULL;
level->exchange_ghosts[justFaces].blocks[1] = NULL;
level->exchange_ghosts[justFaces].blocks[2] = NULL;
level->exchange_ghosts[justFaces].blocks[3] = NULL;
level->exchange_ghosts[justFaces].num_blocks[0] = 0;
level->exchange_ghosts[justFaces].num_blocks[1] = 0;
level->exchange_ghosts[justFaces].num_blocks[2] = 0;
level->exchange_ghosts[justFaces].num_blocks[3] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[0] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[1] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[2] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[3] = 0;
int CommunicateThisDir[27];
int n;for(n=0;n<27;n++)CommunicateThisDir[n]=1;CommunicateThisDir[13]=0;
if(justFaces)for(n=0;n<27;n++)CommunicateThisDir[n]=faces[n];
int sendBox,recvBox;
int stage;
int _rank;
int ghost,numGhosts,numGhostsRemote;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToSend = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *sendRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToSend == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToSend\n");exit(0);}
if(sendRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/sendRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(sendBox=0;sendBox<level->num_my_boxes;sendBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
box_type* lbox = (box_type *)&(level->my_boxes[sendBox]);
int myBoxID = lbox->global_box_id;
int myBox_i = lbox->low.i / level->box_dim;
int myBox_j = lbox->low.j / level->box_dim;
int myBox_k = lbox->low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( level->rank_of_box[neighborBoxID] != -1 ){
ghostsToSend[numGhosts].sendRank = level->my_rank;
ghostsToSend[numGhosts].sendBoxID = myBoxID;
ghostsToSend[numGhosts].sendBox = sendBox;
ghostsToSend[numGhosts].sendDir = dir;
ghostsToSend[numGhosts].recvRank = level->rank_of_box[neighborBoxID];
ghostsToSend[numGhosts].recvBoxID = neighborBoxID;
ghostsToSend[numGhosts].recvBox = -1;
if( level->rank_of_box[neighborBoxID] != level->my_rank ){
sendRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}else{
int recvBox=0;while(level->my_boxes[recvBox].get().global_box_id!=neighborBoxID)recvBox++; // search my list of boxes for the appropriate recvBox index
ghostsToSend[numGhosts].recvBox = recvBox;
}
numGhosts++;
}}
}}}}
}
// sort boxes by sendRank(==my rank) then by sendBoxID... ensures the sends and receive buffers are always sorted by sendBoxID...
qsort(ghostsToSend,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(sendRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numSendRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(sendRanks[ghost] != _rank){_rank=sendRanks[ghost];sendRanks[numSendRanks++]=sendRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the pack/local lists as well as the MPI buffers, and then populate the pack/local lists
level->exchange_ghosts[justFaces].num_sends = numSendRanks;
level->exchange_ghosts[justFaces].send_ranks = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[justFaces].send_sizes = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[justFaces].send_buffers = (double**)malloc(numSendRanks*sizeof(double*));
#ifdef USE_UPCXX
level->exchange_ghosts[justFaces].global_send_buffers = (hclib::upcxx::global_ptr<double> *)malloc(numSendRanks*sizeof(hclib::upcxx::global_ptr<double>));
level->exchange_ghosts[justFaces].global_match_buffers = (hclib::upcxx::global_ptr<double> *)malloc(numSendRanks*sizeof(hclib::upcxx::global_ptr<double>));
level->exchange_ghosts[justFaces].send_match_pos = (int *)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[justFaces].match_rflag = (hclib::upcxx::global_ptr<int> *)hclib::upcxx::allocate< hclib::upcxx::global_ptr<int> >(level->my_rank, numSendRanks);
level->exchange_ghosts[justFaces].copy_e = new hclib::upcxx::event[numSendRanks];
level->exchange_ghosts[justFaces].data_e = new hclib::upcxx::event[numSendRanks];
#endif
if(numSendRanks>0){
if(level->exchange_ghosts[justFaces].send_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_ranks\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].send_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_sizes\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].send_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers\n",justFaces);exit(0);}
#ifdef USE_UPCXX
if(level->exchange_ghosts[justFaces].global_send_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].global_send_buffers\n",justFaces);exit(0);}
#endif
}
level->exchange_ghosts[justFaces].blocks[0] = NULL;
level->exchange_ghosts[justFaces].blocks[1] = NULL;
level->exchange_ghosts[justFaces].blocks[3] = NULL;
level->exchange_ghosts[justFaces].num_blocks[0] = 0;
level->exchange_ghosts[justFaces].num_blocks[1] = 0;
level->exchange_ghosts[justFaces].num_blocks[3] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[0] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[1] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[3] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI send buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numSendRanks;neighbor++){
if(stage==1){
#ifdef USE_UPCXX
level->exchange_ghosts[justFaces].global_send_buffers[neighbor] = hclib::upcxx::allocate<double>(MYTHREAD,level->exchange_ghosts[justFaces].send_sizes[neighbor]);
level->exchange_ghosts[justFaces].send_buffers[neighbor] = (double *)level->exchange_ghosts[justFaces].global_send_buffers[neighbor];
#else
level->exchange_ghosts[justFaces].send_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[justFaces].send_sizes[neighbor]>0)
if(level->exchange_ghosts[justFaces].send_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers[neighbor]\n",justFaces);exit(0);}
#endif
if (level->exchange_ghosts[justFaces].send_sizes[neighbor] > 0)
memset(level->exchange_ghosts[justFaces].send_buffers[neighbor], 0,level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[justFaces].send_ranks[neighbor]=sendRanks[neighbor];
level->exchange_ghosts[justFaces].send_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToSend[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToSend[ghost].sendDir % 3) )-1;
int dj = ((ghostsToSend[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToSend[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:send_i=0; dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:send_i=0; dim_i=level->box_dim; recv_i=0; break;
case 1:send_i=level->box_dim-level->box_ghosts;dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:send_j=0; dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:send_j=0; dim_j=level->box_dim; recv_j=0; break;
case 1:send_j=level->box_dim-level->box_ghosts;dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:send_k=0; dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:send_k=0; dim_k=level->box_dim; recv_k=0; break;
case 1:send_k=level->box_dim-level->box_ghosts;dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
int LocalExchange; // 0 = pack list, 1 = local exchange list
#ifdef USE_UPCXX
if (!hclib::upcxx::is_memory_shared_with(ghostsToSend[ghost].recvRank)) {
#else
if(ghostsToSend[ghost].recvRank != level->my_rank){
#endif
LocalExchange=0; // pack
neighbor=0;while(level->exchange_ghosts[justFaces].send_ranks[neighbor] != ghostsToSend[ghost].recvRank)neighbor++;
}else{
LocalExchange=1; // local
neighbor=-1;
}
if(stage==1){
#ifdef USE_UPCXX
hclib::upcxx::global_ptr<box_type> send_boxgp = level->addr_of_box[ghostsToSend[ghost].sendBoxID];
hclib::upcxx::global_ptr<box_type> recv_boxgp = level->addr_of_box[ghostsToSend[ghost].recvBoxID];
#endif
if(LocalExchange) {// append to the local exchange list...
#ifdef USE_UPCXX
box_type *lbox = (box_type *)recv_boxgp;
int jStride = lbox->jStride;
int kStride = lbox->kStride;
#else
int jStride = level->my_boxes[ghostsToSend[ghost].recvBox].jStride;
int kStride = level->my_boxes[ghostsToSend[ghost].recvBox].kStride;
#endif
if (level->my_rank == ghostsToSend[ghost].recvRank) {
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[1]),&(level->exchange_ghosts[justFaces].allocated_blocks[1]),&(level->exchange_ghosts[justFaces].num_blocks[1]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
#ifdef USE_UPCXX
/* read.boxgp = */ send_boxgp,
#endif
/* read.box = */ ghostsToSend[ghost].sendBoxID,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().kStride,
/* read.scale = */ 1,
#ifdef USE_UPCXX
/* write.boxgp = */ recv_boxgp,
#endif
/* write.box = */ ghostsToSend[ghost].recvBoxID,
/* write.ptr = */ NULL,
/* write.i = */ recv_i,
/* write.j = */ recv_j,
/* write.k = */ recv_k,
/* write.jStride = */ jStride, //level->my_boxes[ghostsToSend[ghost].recvBox].get().jStride,
/* write.kStride = */ kStride, //level->my_boxes[ghostsToSend[ghost].recvBox].get().kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
); }
else {
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[3]),&(level->exchange_ghosts[justFaces].allocated_blocks[3]),&(level->exchange_ghosts[justFaces].num_blocks[3]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
#ifdef USE_UPCXX
/* read.boxgp = */ send_boxgp,
#endif
/* read.box = */ ghostsToSend[ghost].sendBoxID,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().kStride,
/* read.scale = */ 1,
#ifdef USE_UPCXX
/* write.boxgp = */ recv_boxgp,
#endif
/* write.box = */ ghostsToSend[ghost].recvBoxID,
/* write.ptr = */ NULL,
/* write.i = */ recv_i,
/* write.j = */ recv_j,
/* write.k = */ recv_k,
/* write.jStride = */ jStride, //level->my_boxes[ghostsToSend[ghost].recvBox].get().jStride,
/* write.kStride = */ kStride, //level->my_boxes[ghostsToSend[ghost].recvBox].get().kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
} // if (level->my_rank = rece_rank)
}
else // append to the MPI pack list...
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[0]),&(level->exchange_ghosts[justFaces].allocated_blocks[0]),&(level->exchange_ghosts[justFaces].num_blocks[0]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
#ifdef USE_UPCXX
/* read.boxgp = */ send_boxgp,
#endif
/* read.box = */ ghostsToSend[ghost].sendBoxID,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].get().kStride,
/* read.scale = */ 1,
#ifdef USE_UPCXX
/* write.boxgp = */ recv_boxgp,
#endif
/* write.box = */ -1,
/* write.ptr = */ level->exchange_ghosts[justFaces].send_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/* write.i = */ level->exchange_ghosts[justFaces].send_sizes[neighbor], // current offset in the MPI send buffer
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ dim_i, // contiguous block
/* write.kStride = */ dim_i*dim_j, // contiguous block
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
}
if(neighbor>=0)level->exchange_ghosts[justFaces].send_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToSend);
free(sendRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToRecv = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *recvRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToRecv == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToRecv\n");exit(0);}
if(recvRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/recvRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(recvBox=0;recvBox<level->num_my_boxes;recvBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
box_type* lbox = (box_type *)&(level->my_boxes[recvBox]);
int myBoxID = lbox->global_box_id;
int myBox_i = lbox->low.i / level->box_dim;
int myBox_j = lbox->low.j / level->box_dim;
int myBox_k = lbox->low.k / level->box_dim;
int neighborBoxID = -1;
//if(1){
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( (level->rank_of_box[neighborBoxID] != -1) && (level->rank_of_box[neighborBoxID] != level->my_rank) ){
ghostsToRecv[numGhosts].sendRank = level->rank_of_box[neighborBoxID];
ghostsToRecv[numGhosts].sendBoxID = neighborBoxID;
ghostsToRecv[numGhosts].sendBox = -1;
ghostsToRecv[numGhosts].sendDir = 26-dir;
ghostsToRecv[numGhosts].recvRank = level->my_rank;
ghostsToRecv[numGhosts].recvBoxID = myBoxID;
ghostsToRecv[numGhosts].recvBox = recvBox;
numGhosts++;
recvRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}}
}}}}
}
// sort boxes by sendRank then by sendBoxID... ensures the recvs and receive buffers are always sorted by sendBoxID...
qsort(ghostsToRecv,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(recvRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numRecvRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(recvRanks[ghost] != _rank){_rank=recvRanks[ghost];recvRanks[numRecvRanks++]=recvRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the unpack lists as well as the MPI buffers, and then populate the unpack list
level->exchange_ghosts[justFaces].num_recvs = numRecvRanks;
level->exchange_ghosts[justFaces].recv_ranks = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[justFaces].recv_sizes = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[justFaces].recv_buffers = (double**)malloc(numRecvRanks*sizeof(double*));
#ifdef USE_UPCXX
level->exchange_ghosts[justFaces].sblock2 = (int*)malloc((numRecvRanks+2)*sizeof(int));
level->exchange_ghosts[justFaces].rflag = hclib::upcxx::allocate<int>(MYTHREAD, MAX_VG);
int *tmpp = (int *)level->exchange_ghosts[justFaces].rflag;
memset(tmpp, 0, MAX_VG * sizeof(int));
memset((void *)level->exchange_ghosts[justFaces].sblock2, 0, sizeof(int)*(numRecvRanks+2));
level->exchange_ghosts[justFaces].global_recv_buffers = (hclib::upcxx::global_ptr<double> *) malloc(numRecvRanks*sizeof(hclib::upcxx::global_ptr<double>));
#endif
if(numRecvRanks>0){
if(level->exchange_ghosts[justFaces].recv_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_ranks\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].recv_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_sizes\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].recv_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers\n",justFaces);exit(0);}
#ifdef USE_UPCXX
if(level->exchange_ghosts[justFaces].global_recv_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].global_recv_buffers\n",justFaces);exit(0);}
#endif
}
level->exchange_ghosts[justFaces].blocks[2] = NULL;
level->exchange_ghosts[justFaces].num_blocks[2] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[2] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI recv buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numRecvRanks;neighbor++){
if(stage==1){
#ifdef USE_UPCXX
level->exchange_ghosts[justFaces].global_recv_buffers[neighbor] = hclib::upcxx::allocate<double>(MYTHREAD, level->exchange_ghosts[justFaces].recv_sizes[neighbor]);
level->exchange_ghosts[justFaces].recv_buffers[neighbor] = (double *)level->exchange_ghosts[justFaces].global_recv_buffers[neighbor];
#else
level->exchange_ghosts[justFaces].recv_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[justFaces].recv_sizes[neighbor]>0)
if(level->exchange_ghosts[justFaces].recv_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers[neighbor]\n",justFaces);exit(0);}
#endif
if (level->exchange_ghosts[justFaces].recv_sizes[neighbor] > 0)
memset(level->exchange_ghosts[justFaces].recv_buffers[neighbor], 0,level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[justFaces].recv_ranks[neighbor]=recvRanks[neighbor];
level->exchange_ghosts[justFaces].recv_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
//int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToRecv[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToRecv[ghost].sendDir % 3) )-1;
int dj = ((ghostsToRecv[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToRecv[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:dim_i=level->box_dim; recv_i=0; break;
case 1:dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:dim_j=level->box_dim; recv_j=0; break;
case 1:dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:dim_k=level->box_dim; recv_k=0; break;
case 1:dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
#ifdef USE_UPCXX
if (hclib::upcxx::is_memory_shared_with(ghostsToRecv[ghost].sendRank)) continue;
#endif
neighbor=0;while(level->exchange_ghosts[justFaces].recv_ranks[neighbor] != ghostsToRecv[ghost].sendRank)neighbor++;
if(stage==1) {
#ifdef USE_UPCXX
hclib::upcxx::global_ptr<box_type> send_boxgp = level->addr_of_box[ghostsToRecv[ghost].sendBoxID];
hclib::upcxx::global_ptr<box_type> recv_boxgp = level->addr_of_box[ghostsToRecv[ghost].recvBoxID];
#endif
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[2]),&(level->exchange_ghosts[justFaces].allocated_blocks[2]),&(level->exchange_ghosts[justFaces].num_blocks[2]),
/*dim.i = */ dim_i,
/*dim.j = */ dim_j,
/*dim.k = */ dim_k,
#ifdef USE_UPCXX
/*read.boxgp = */ send_boxgp,
#endif
/*read.box = */ (-1)*ghostsToRecv[ghost].sendRank-1, // shan -1,
/*read.ptr = */ level->exchange_ghosts[justFaces].recv_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/*read.i = */ level->exchange_ghosts[justFaces].recv_sizes[neighbor], // current offset in the MPI recv buffer
/*read.j = */ 0,
/*read.k = */ 0,
/*read.jStride = */ dim_i, // contiguous block
/*read.kStride = */ dim_i*dim_j, // contiguous block
/*read.scale = */ 1,
#ifdef USE_UPCXX
/*write.box = */ recv_boxgp,
#endif
/*write.box = */ ghostsToRecv[ghost].recvBoxID,
/*write.ptr = */ NULL,
/*write.i = */ recv_i,
/*write.j = */ recv_j,
/*write.k = */ recv_k,
/*write.jStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].get().jStride,
/*write.kStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].get().kStride,
/*write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
); }
if(neighbor>=0)level->exchange_ghosts[justFaces].recv_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToRecv);
free(recvRanks);
#ifdef USE_UPCXX
hclib::upcxx::barrier();
// compute the global_match_buffers for upcxx::async_copy()
int neighbor;
hclib::upcxx::global_ptr<double> p, p1, p2;
for (neighbor = 0; neighbor < level->exchange_ghosts[justFaces].num_recvs; neighbor++) {
int nid = level->exchange_ghosts[justFaces].recv_ranks[neighbor];
upc_buf_info[MYTHREAD * THREADS + nid] = level->exchange_ghosts[justFaces].global_recv_buffers[neighbor];
upc_int_info[MYTHREAD * THREADS + nid] = neighbor;
}
upc_rflag_ptr[level->my_rank] = level->exchange_ghosts[justFaces].rflag;
hclib::upcxx::barrier();
for (neighbor = 0; neighbor < level->exchange_ghosts[justFaces].num_sends; neighbor++) {
int nid = level->exchange_ghosts[justFaces].send_ranks[neighbor];
level->exchange_ghosts[justFaces].global_match_buffers[neighbor] = upc_buf_info[THREADS*nid + MYTHREAD];
level->exchange_ghosts[justFaces].send_match_pos[neighbor] = upc_int_info[THREADS*nid + MYTHREAD];
level->exchange_ghosts[justFaces].match_rflag[neighbor] = upc_rflag_ptr[nid];
}
hclib::upcxx::barrier();
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// malloc MPI requests/status arrays
#endif
#ifdef USE_UPCXX
// setup start and end position for each receiver
if (level->exchange_ghosts[justFaces].num_blocks[2] > 0) {
int curpos = 0;
int curproc = level->exchange_ghosts[justFaces].blocks[2][0].read.box * (-1) -1;
while (level->exchange_ghosts[justFaces].recv_ranks[curpos] != curproc && curpos < level->exchange_ghosts[justFaces].num_recvs) curpos++;
level->exchange_ghosts[justFaces].sblock2[curpos] = 0; // already initialized to 0, just be safe
for(int buffer=1;buffer<level->exchange_ghosts[justFaces].num_blocks[2];buffer++){
if (level->exchange_ghosts[justFaces].blocks[2][buffer].read.box != -1-curproc) {
level->exchange_ghosts[justFaces].sblock2[++curpos] = buffer;
curproc = level->exchange_ghosts[justFaces].blocks[2][buffer].read.box * (-1) -1;
while (curproc != level->exchange_ghosts[justFaces].recv_ranks[curpos]) {
level->exchange_ghosts[justFaces].sblock2[++curpos] = buffer;
}
}
}
for (int i = curpos+1; i <= level->exchange_ghosts[justFaces].num_recvs; i++) {
level->exchange_ghosts[justFaces].sblock2[i] = level->exchange_ghosts[justFaces].num_blocks[2];
}
}
#endif
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void create_level(level_type *level, int boxes_in_i, int box_dim, int box_ghosts, int box_vectors, int domain_boundary_condition, int my_rank, int num_ranks){
int box;
int TotalBoxes = boxes_in_i*boxes_in_i*boxes_in_i;
if(my_rank==0){
if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout,"\nattempting to create a %d^3 level (with Dirichlet BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n",box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout,"\nattempting to create a %d^3 level (with Periodic BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n", box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
}
// int omp_threads = 1;
// int omp_nested = 0;
int omp_threads = hclib::num_workers();
int omp_nested = 1;
// #ifdef _OPENMP
// #pragma omp parallel
// {
// #pragma omp master
// {
// omp_threads = omp_get_num_threads();
// omp_nested = omp_get_nested();
// }
// }
// #endif
if(box_ghosts < stencil_get_radius() ){
if(my_rank==0)fprintf(stderr,"ghosts(%d) must be >= stencil_get_radius(%d)\n",box_ghosts,stencil_get_radius());
exit(0);
}
level->memory_allocated = 0;
level->box_dim = box_dim;
level->box_ghosts = box_ghosts;
level->box_vectors = box_vectors;
level->boxes_in.i = boxes_in_i;
level->boxes_in.j = boxes_in_i;
level->boxes_in.k = boxes_in_i;
level->dim.i = box_dim*level->boxes_in.i;
level->dim.j = box_dim*level->boxes_in.j;
level->dim.k = box_dim*level->boxes_in.k;
level->active = 1;
level->my_rank = my_rank;
level->num_ranks = num_ranks;
level->boundary_condition.type = domain_boundary_condition;
level->alpha_is_zero = -1;
level->num_threads = omp_threads;
// intra-box threading...
level->threads_per_box = omp_threads;
level->concurrent_boxes = 1;
// inter-box threading...
//level->threads_per_box = 1;
//level->concurrent_boxes = omp_threads;
level->my_blocks = NULL;
level->num_my_blocks = 0;
level->allocated_blocks = 0;
level->tag = log2(level->dim.i);
// allocate 3D array of integers to hold the MPI rank of the corresponding box and initialize to -1 (unassigned)
level->rank_of_box = (int*)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
if(level->rank_of_box==NULL){fprintf(stderr,"malloc of level->rank_of_box failed\n");exit(0);}
level->memory_allocated += (level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){level->rank_of_box[box]=-1;} // -1 denotes that there is no actual box assigned to this region
// parallelize the grid (i.e. assign a process rank to each box)...
#ifdef DECOMPOSE_LEX
decompose_level_lex(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks);
#elif DECOMPOSE_BISECTION_SPECIAL
decompose_level_bisection_special(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,num_ranks);
#else
decompose_level_bisection(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#endif
//print_decomposition(level);// for debug purposes only
// build my list of boxes...
level->num_my_boxes=0;
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){if(level->rank_of_box[box]==level->my_rank)level->num_my_boxes++;}
#ifdef USE_UPCXX
level->my_boxes = hclib::upcxx::allocate<box_type>(MYTHREAD, level->num_my_boxes);
level->my_local_boxes = (box_type **)malloc(level->num_my_boxes * sizeof(box_type *));
/* NOTE: add a local array to record the global address of every box, later, this array can be shrinked to neighbor boxes only */
level->addr_of_box = (hclib::upcxx::global_ptr<box_type> *)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(hclib::upcxx::global_ptr<box_type>));
#else
level->my_boxes = (box_type*)malloc(level->num_my_boxes*sizeof(box_type));
#endif
if((level->num_my_boxes>0)&&(level->my_boxes==NULL)){fprintf(stderr,"malloc failed - create_level/level->my_boxes\n");exit(0);}
box=0;
int i,j,k;
for(k=0;k<level->boxes_in.k;k++){
for(j=0;j<level->boxes_in.j;j++){
for(i=0;i<level->boxes_in.i;i++){
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
int b=i + j*jStride + k*kStride;
if(level->rank_of_box[b]==level->my_rank){
level->memory_allocated += create_box((box_type *)&level->my_boxes[box],level->box_vectors,level->box_dim,level->box_ghosts);
#ifdef USE_UPCXX
upc_box_info[b] = &level->my_boxes[box];
box_type* lbox = (box_type *)&(level->my_boxes[box]);
level->my_local_boxes[box] = lbox;
lbox->low.i = i*level->box_dim;
lbox->low.j = j*level->box_dim;
lbox->low.k = k*level->box_dim;
lbox->global_box_id = b;
#else
level->my_boxes[box].low.i = i*level->box_dim;
level->my_boxes[box].low.j = j*level->box_dim;
level->my_boxes[box].low.k = k*level->box_dim;
level->my_boxes[box].global_box_id = b;
#endif
box++;
}}}}
#ifdef USE_UPCXX
// NOTE: this may removed later
hclib::upcxx::barrier();
for (i = 0; i < level->boxes_in.i*level->boxes_in.j*level->boxes_in.k; i++)
level->addr_of_box[i] = upc_box_info[i];
hclib::upcxx::barrier();
#endif
// Build and auxilarlly data structure that flattens boxes into blocks...
for(box=0;box<level->num_my_boxes;box++){
#ifdef USE_UPCXX
hclib::upcxx::global_ptr<box_type> boxgp = level->addr_of_box[level->my_boxes[box].get().global_box_id];
#endif
append_block_to_list(&(level->my_blocks),&(level->allocated_blocks),&(level->num_my_blocks),
/* dim.i = */ level->my_boxes[box].get().dim,
/* dim.j = */ level->my_boxes[box].get().dim,
/* dim.k = */ level->my_boxes[box].get().dim,
#ifdef USE_UPCXX
/* read.boxgp = */ boxgp,
#endif
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ 0,
/* read.j = */ 0,
/* read.k = */ 0,
/* read.jStride = */ level->my_boxes[box].get().jStride,
/* read.kStride = */ level->my_boxes[box].get().kStride,
/* read.scale = */ 1,
#ifdef USE_UPCXX
/* write.boxgp = */ boxgp,
#endif
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ 0,
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ level->my_boxes[box].get().jStride,
/* write.kStride = */ level->my_boxes[box].get().kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
}
// Tune the OpenMP style of parallelism...
if(omp_nested){
#ifndef OMP_STENCILS_PER_THREAD
#define OMP_STENCILS_PER_THREAD 64
#endif
level->concurrent_boxes = level->num_my_boxes;
if(level->concurrent_boxes > omp_threads)level->concurrent_boxes = omp_threads;
if(level->concurrent_boxes < 1)level->concurrent_boxes = 1;
level->threads_per_box = omp_threads / level->concurrent_boxes;
if(level->threads_per_box > level->box_dim*level->box_dim)
level->threads_per_box = level->box_dim*level->box_dim; // JK collapse
if(level->threads_per_box > level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD )
level->threads_per_box = level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD;
if(level->threads_per_box<1)level->threads_per_box = 1;
}else{
if(level->num_my_boxes>8){level->concurrent_boxes=omp_threads;level->threads_per_box=1;}
}
if(my_rank==0){
if(omp_nested)fprintf(stdout," OMP_NESTED=TRUE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box);
else fprintf(stdout," OMP_NESTED=FALSE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box);
}
// build an assists data structure which specifies which cells are within the domain (used with STENCIL_FUSE_BC)
initialize_valid_region(level);
// build an assist structure for Gauss Seidel Red Black that would facilitate unrolling and SIMDization...
if(level->num_my_boxes){
int i,j;
int kStride = level->my_boxes[0].get().kStride;
int jStride = level->my_boxes[0].get().jStride;
//posix_memalign((void**)&(level->RedBlack_FP[0] ),64,kStride*sizeof(double )); // even planes
//posix_memalign((void**)&(level->RedBlack_FP[1] ),64,kStride*sizeof(double ));
// FIX... align RedBlack_FP the same as elements within a plane (i.e. BOX_SIMD_ALIGNMENT)
level->RedBlack_FP[0] = (double*)malloc(kStride*sizeof(double));
level->RedBlack_FP[1] = (double*)malloc(kStride*sizeof(double));
memset(level->RedBlack_FP[0],0,kStride*sizeof(double));
memset(level->RedBlack_FP[1],0,kStride*sizeof(double));
level->memory_allocated += kStride*sizeof(double);
level->memory_allocated += kStride*sizeof(double);
for(j=0-level->box_ghosts;j<level->box_dim+level->box_ghosts;j++){
for(i=0-level->box_ghosts;i<level->box_dim+level->box_ghosts;i++){
int ij = (i+level->box_ghosts) + (j+level->box_ghosts)*jStride;
// if((i^j)&0x1)level->RedBlack_64bMask[ij]= ~0;else level->RedBlack_64bMask[ij]= 0; // useful for blend instructions
if((i^j^1)&0x1)level->RedBlack_FP[ 0][ij]=1.0;else level->RedBlack_FP[ 0][ij]=0.0;
if((i^j^1)&0x1)level->RedBlack_FP[ 1][ij]=0.0;else level->RedBlack_FP[ 1][ij]=1.0;
}}
}
// create mini programs that affect ghost zone exchanges
for(i=0;i<2;i++){
level->exchange_ghosts[i].num_recvs =0;
level->exchange_ghosts[i].num_sends =0;
level->exchange_ghosts[i].num_blocks[0]=0;
level->exchange_ghosts[i].num_blocks[1]=0;
level->exchange_ghosts[i].num_blocks[2]=0;
}
build_exchange_ghosts(level,0); // faces, edges, corners
build_exchange_ghosts(level,1); // justFaces
build_boundary_conditions(level,0); // faces, edges, corners
build_boundary_conditions(level,1); // just faces
// duplicate MPI_COMM_WORLD to be the communicator for each level
#ifdef USE_MPI
if(my_rank==0){fprintf(stdout," Duplicating MPI_COMM_WORLD...");fflush(stdout);}
double time_start = hclib::MPI_Wtime();
hclib::MPI_Comm_dup(MPI_COMM_WORLD,&level->MPI_COMM_ALLREDUCE);
// create upcxx barrier
level->subteam = &hclib::upcxx::team_all;
double time_end = hclib::MPI_Wtime();
double time_in_comm_dup = 0;
double time_in_comm_dup_send = time_end-time_start;
hclib::MPI_Allreduce(&time_in_comm_dup_send,&time_in_comm_dup,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
if(my_rank==0){fprintf(stdout,"done (%0.6f seconds)\n",time_in_comm_dup);fflush(stdout);}
#endif
// report on potential load imbalance
int BoxesPerProcess = level->num_my_boxes;
#ifdef USE_MPI
int BoxesPerProcessSend = level->num_my_boxes;
hclib::MPI_Allreduce(&BoxesPerProcessSend,&BoxesPerProcess,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD);
#endif
if(my_rank==0){fprintf(stdout," Calculating boxes per process... target=%0.3f, max=%d\n",(double)TotalBoxes/(double)num_ranks,BoxesPerProcess);}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void reset_level_timers(level_type *level){
// cycle counters information...
level->cycles.smooth = 0;
level->cycles.apply_op = 0;
level->cycles.residual = 0;
level->cycles.blas1 = 0;
level->cycles.blas3 = 0;
level->cycles.boundary_conditions = 0;
level->cycles.restriction_total = 0;
level->cycles.restriction_pack = 0;
level->cycles.restriction_local = 0;
level->cycles.restriction_shm = 0;
level->cycles.restriction_unpack = 0;
level->cycles.restriction_recv = 0;
level->cycles.restriction_send = 0;
level->cycles.restriction_wait = 0;
level->cycles.interpolation_total = 0;
level->cycles.interpolation_pack = 0;
level->cycles.interpolation_local = 0;
level->cycles.interpolation_shm = 0;
level->cycles.interpolation_unpack = 0;
level->cycles.interpolation_recv = 0;
level->cycles.interpolation_send = 0;
level->cycles.interpolation_wait = 0;
level->cycles.ghostZone_total = 0;
level->cycles.ghostZone_pack = 0;
level->cycles.ghostZone_local = 0;
level->cycles.ghostZone_shm = 0;
level->cycles.ghostZone_unpack = 0;
level->cycles.ghostZone_recv = 0;
level->cycles.ghostZone_send = 0;
level->cycles.ghostZone_wait = 0;
level->cycles.collectives = 0;
level->cycles.Total = 0;
// solver events information...
level->Krylov_iterations = 0;
level->CAKrylov_formations_of_G = 0;
level->vcycles_from_this_level = 0;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void max_level_timers(level_type *level){
uint64_t temp;
#ifdef USE_MPI
temp=level->cycles.smooth; hclib::MPI_Allreduce(&temp,&level->cycles.smooth ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.apply_op; hclib::MPI_Allreduce(&temp,&level->cycles.apply_op ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.residual; hclib::MPI_Allreduce(&temp,&level->cycles.residual ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.blas1; hclib::MPI_Allreduce(&temp,&level->cycles.blas1 ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.blas3; hclib::MPI_Allreduce(&temp,&level->cycles.blas3 ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.boundary_conditions; hclib::MPI_Allreduce(&temp,&level->cycles.boundary_conditions ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_total; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_pack; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_local; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_shm ; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_shm ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_unpack; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_unpack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_recv; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_send; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_wait; hclib::MPI_Allreduce(&temp,&level->cycles.restriction_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_total; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_pack; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_local; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_shm; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_shm ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_unpack;hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_unpack,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_recv; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_send; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_wait; hclib::MPI_Allreduce(&temp,&level->cycles.interpolation_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_total; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_pack; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_local; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_shm; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_shm ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_unpack; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_unpack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_recv; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_send; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_wait; hclib::MPI_Allreduce(&temp,&level->cycles.ghostZone_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.collectives; hclib::MPI_Allreduce(&temp,&level->cycles.collectives ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.Total; hclib::MPI_Allreduce(&temp,&level->cycles.Total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
#endif
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void destroy_level(level_type *level){
int box;
for(box=0;box<level->num_my_boxes;box++) destroy_box((box_type *)&level->my_boxes[box]);
free(level->rank_of_box);
}
|
byzantine.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "ompdist/queues.h"
#include "ompdist/utils.h"
#include "config.h"
#define L ((5*N)/8 + 1)
#define H ((3*N)/4 + 1)
#define G ((7*N)/8)
typedef struct {
int b;
int vote;
int good;
int d;
int decided;
} processor;
typedef struct {
int from;
int vote;
} vote;
/**
* new_processors - Allocates and initializes N new processors.
*
* @N: the number of processors
*
* Returns a pointer to a processor array.
*/
processor* new_processors(int N) {
DEBUG("allocating %d processors\n", N);
processor* processors = malloc(N * sizeof(processor));
DEBUG("initializing %d processors\n", N);
for (int i = 0; i < N; i++) {
processor* p = processors+i;
p->good = rand()%100 > 10; // 10% of the processors are bad
p->b = rand()%2;
p->vote = p->b;
p->d = 0;
p->decided = 0;
}
return processors;
}
/**
* any_good_undecided_processor - Checks if there are any good processors that
* are still undecided on the vote.
*
* @processors: a pointer to a processor array
* @N: the number of processors
*
* Returns 0 if there aren't any such good, undecided processors. Returns 1
* otherwise.
*/
int any_good_undecided_processor(processor* processors, int N) {
int ret = 0;
DEBUG("checking if there are any undecided processors\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < N; i++) {
processor* p = processors+i;
if (p->decided == 0)
ret = 1;
}
DEBUG("ret = %d\n", ret);
return ret;
}
/**
* broadcast_vote - Braodcasts each processor's votes to all other nodes.
*
* @processors: a pointer to a processor array
* @N: the number of processors
* @vote_ql: a queuelist of votes
*/
void broadcast_vote(processor* processors, int N, queuelist* vote_ql) {
DEBUG("broadcasting votes\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < N; i++) {
processor* p = processors+i;
for (int j = 0; j < N; j++) {
if (i == j)
continue;
vote v = {i, p->vote};
enqueue(vote_ql, j, &v);
}
}
}
/**
* receive_votes - Receives and computes teh votes for each node.
*
* @processors: a pointer to a processor array
* @N: the number of processors
* @vote_ql: a queuelist of votes
*/
void receive_votes(processor* processors, int N, queuelist* vote_ql) {
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < N; i++) {
processor* p = processors+i;
int yes = 0;
int no = 0;
while (!is_ql_queue_empty(vote_ql, i)) {
vote* v = dequeue(vote_ql, i);
if (v->vote)
yes++;
else
no++;
}
int maj = 1;
int tally = yes;
if (no > yes) {
maj = 0;
tally = no;
}
int threshold;
if (rand() % 2 == 0)
threshold = L;
else
threshold = H;
if (tally > threshold)
p->vote = maj;
else
p->vote = 0;
if (tally >= G) {
p->decided = 1;
p->d = maj;
}
}
}
/**
* verify_and_print_solution - Verifies that the computed solution is correct
* and prints the solution.
*
* @processors: a pointer to an array of processors
* @N: the number of processors
*
* Returns 1 if there's no consensus. Returns 0 if there's a consensus.
*/
int verify_and_print_solution(processor* processors, int N) {
int yes = 0;
int no = 0;
int good = 0;
for (int i = 0; i < N; i++) {
processor* p = processors+i;
if (p->good)
good++;
if (p->decided && p->good) {
if (p->d == 0)
no++;
else
yes++;
}
}
if (yes+no != good) {
WARN("incorrect: some processors haven't decided yet\n");
return 1;
}
if (yes != 0 && no != 0) {
WARN("incorrect: there's no consensus\n");
return 1;
}
INFO("correct: there's consensus: of the %d good processors, yes=%d, no=%d\n", good, yes, no);
return 0;
}
/**
* Based on section 12.6 Byzantine Agreement of Rajeev Motwani's and Prabakhar
* Raghavan's book Randomized Algorithms.
*/
int main(int argc, char* argv[]) {
int N = 16;
if (argc > 1) {
sscanf(argv[1], "%d", &N);
}
srand(0);
processor* processors = new_processors(N);
queuelist* vote_ql = new_queuelist(N, sizeof(vote));
while (1) {
if (!any_good_undecided_processor(processors, N))
break;
broadcast_vote(processors, N, vote_ql);
receive_votes(processors, N, vote_ql);
}
return verify_and_print_solution(processors, N);
}
|
multisort-omp-leaf.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
#define BLOCK_SIZE 1024L
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) {
if (length < MIN_MERGE_SIZE*2L) {
// Base case
#pragma omp task
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
merge(n, left, right, result, start, length/2);
merge(n, left, right, result, start + length/2, length/2);
}
}
void multisort(long n, T data[n], T tmp[n]) {
if (n >= MIN_SORT_SIZE*4L) {
// Recursive decomposition
multisort(n/4L, &data[0], &tmp[0]);
multisort(n/4L, &data[n/4L], &tmp[n/4L]);
multisort(n/4L, &data[n/2L], &tmp[n/2L]);
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]);
#pragma omp taskwait
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L);
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L);
#pragma omp taskwait
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n);
} else {
// Base case
#pragma omp task
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
else {
// printf ("data IS ordered; ");
}
}
int main(int argc, char **argv) {
if (argc != 4) {
fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]);
return 1;
}
N = atol(argv[1]) * BLOCK_SIZE;
MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE;
MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE;
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
initialize(N, data);
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(8*t3+Nx-5,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),32*t4+30);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t3+Nx,64),floord(Nt+Nx-4,64)),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),64*t4+62),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates >> 2))
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
double value = InterpretLocaleValue(string,sentinal);
if (value < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (value > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if (LocaleNCompare(clone_info->magick,"http",4) == 0)
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% John Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *fourier)
{
double
*roll;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
roll=(double *) AcquireQuantumMemory((size_t) height,width*sizeof(*roll));
if (roll == (double *) NULL)
return(MagickFalse);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
roll[v*width+u]=fourier[i++];
}
}
(void) CopyMagickMemory(fourier,roll,height*width*sizeof(*roll));
roll=(double *) RelinquishMagickMemory(roll);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source,double *destination)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,source);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
destination[width*y+x+width/2L]=source[center*y+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
destination[width*(height-y)+width/2L-x-1L]=source[center*y+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
destination[-x+width/2L-1L]=destination[x+width/2L+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_source,
*phase_source;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_source=(double *) AcquireQuantumMemory((size_t)
fourier_info->height,fourier_info->width*sizeof(*magnitude_source));
if (magnitude_source == (double *) NULL)
return(MagickFalse);
(void) ResetMagickMemory(magnitude_source,0,fourier_info->height*
fourier_info->width*sizeof(*magnitude_source));
phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_source));
if (phase_source == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
magnitude_source=(double *) RelinquishMagickMemory(magnitude_source);
return(MagickFalse);
}
status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,
magnitude,magnitude_source);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,phase,
phase_source);
CorrectPhaseLHS(fourier_info->height,fourier_info->height,phase_source);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_source[i]/=(2.0*MagickPI);
phase_source[i]+=0.5;
i++;
}
}
magnitude_view=AcquireCacheView(magnitude_image);
phase_view=AcquireCacheView(phase_image);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetRedPixelComponent(q,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
case GreenChannel:
{
SetGreenPixelComponent(q,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
case BlueChannel:
{
SetBluePixelComponent(q,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
case OpacityChannel:
{
SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
case IndexChannel:
{
SetIndexPixelComponent(indexes+x,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
case GrayChannels:
{
SetGrayPixelComponent(q,ClampToQuantum(QuantumRange*
magnitude_source[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetRedPixelComponent(q,ClampToQuantum(QuantumRange*
phase_source[i]));
break;
}
case GreenChannel:
{
SetGreenPixelComponent(q,ClampToQuantum(QuantumRange*
phase_source[i]));
break;
}
case BlueChannel:
{
SetBluePixelComponent(q,ClampToQuantum(QuantumRange*
phase_source[i]));
break;
}
case OpacityChannel:
{
SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange*
phase_source[i]));
break;
}
case IndexChannel:
{
SetIndexPixelComponent(indexes+x,ClampToQuantum(QuantumRange*
phase_source[i]));
break;
}
case GrayChannels:
{
SetGrayPixelComponent(q,ClampToQuantum(QuantumRange*phase_source[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
magnitude_view=DestroyCacheView(magnitude_view);
phase_source=(double *) RelinquishMagickMemory(phase_source);
magnitude_source=(double *) RelinquishMagickMemory(magnitude_source);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*image_view;
double
n,
*source;
fftw_complex
*fourier;
fftw_plan
fftw_r2c_plan;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source));
if (source == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
ResetMagickMemory(source,0,fourier_info->height*fourier_info->width*
sizeof(*source));
i=0L;
image_view=AcquireCacheView(image);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source[i]=QuantumScale*GetRedPixelComponent(p);
break;
}
case GreenChannel:
{
source[i]=QuantumScale*GetGreenPixelComponent(p);
break;
}
case BlueChannel:
{
source[i]=QuantumScale*GetBluePixelComponent(p);
break;
}
case OpacityChannel:
{
source[i]=QuantumScale*GetOpacityPixelComponent(p);
break;
}
case IndexChannel:
{
source[i]=QuantumScale*GetIndexPixelComponent(indexes+x);
break;
}
case GrayChannels:
{
source[i]=QuantumScale*GetGrayPixelComponent(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*fourier));
if (fourier == (fftw_complex *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source=(double *) RelinquishMagickMemory(source);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->width,
source,fourier,FFTW_ESTIMATE);
fftw_execute(fftw_r2c_plan);
fftw_destroy_plan(fftw_r2c_plan);
source=(double *) RelinquishMagickMemory(source);
/*
Normalize Fourier transform.
*/
n=(double) fourier_info->width*(double) fourier_info->width;
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier[i]/=n;
#else
fourier[i][0]/=n;
fourier[i][1]/=n;
#endif
i++;
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude[i]=cabs(fourier[i]);
phase[i]=carg(fourier[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude[i]=creal(fourier[i]);
phase[i]=cimag(fourier[i]);
i++;
}
fourier=(fftw_complex *) RelinquishMagickMemory(fourier);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude,
*phase;
fftw_complex
*fourier;
FourierInfo
fourier_info;
MagickBooleanType
status;
size_t
extent;
fourier_info.width=image->columns;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*magnitude));
if (magnitude == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*phase));
if (phase == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(MagickFalse);
}
fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*fourier));
if (fourier == (fftw_complex *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
phase=(double *) RelinquishMagickMemory(phase);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(MagickFalse);
}
status=ForwardFourierTransform(&fourier_info,image,magnitude,phase,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude,phase,
exception);
fourier=(fftw_complex *) RelinquishMagickMemory(fourier);
phase=(double *) RelinquishMagickMemory(phase);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
extent,
width;
width=image->columns;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
magnitude_image=CloneImage(image,width,width,MagickFalse,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,width,MagickFalse,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,
RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2.0)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[center*(height-y)-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[center*y]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,fftw_complex *fourier,
ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude,
*phase,
*magnitude_source,
*phase_source;
MagickBooleanType
status;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_source=(double *) AcquireQuantumMemory((size_t)
fourier_info->height,fourier_info->width*sizeof(*magnitude_source));
if (magnitude_source == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_source));
if (phase_source == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
magnitude_source=(double *) RelinquishMagickMemory(magnitude_source);
return(MagickFalse);
}
i=0L;
magnitude_view=AcquireCacheView(magnitude_image);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_source[i]=QuantumScale*GetRedPixelComponent(p);
break;
}
case GreenChannel:
{
magnitude_source[i]=QuantumScale*GetGreenPixelComponent(p);
break;
}
case BlueChannel:
{
magnitude_source[i]=QuantumScale*GetBluePixelComponent(p);
break;
}
case OpacityChannel:
{
magnitude_source[i]=QuantumScale*GetOpacityPixelComponent(p);
break;
}
case IndexChannel:
{
magnitude_source[i]=QuantumScale*GetIndexPixelComponent(indexes+x);
break;
}
case GrayChannels:
{
magnitude_source[i]=QuantumScale*GetGrayPixelComponent(p);
break;
}
}
i++;
p++;
}
}
i=0L;
phase_view=AcquireCacheView(phase_image);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_source[i]=QuantumScale*GetRedPixelComponent(p);
break;
}
case GreenChannel:
{
phase_source[i]=QuantumScale*GetGreenPixelComponent(p);
break;
}
case BlueChannel:
{
phase_source[i]=QuantumScale*GetBluePixelComponent(p);
break;
}
case OpacityChannel:
{
phase_source[i]=QuantumScale*GetOpacityPixelComponent(p);
break;
}
case IndexChannel:
{
phase_source[i]=QuantumScale*GetIndexPixelComponent(indexes+x);
break;
}
case GrayChannels:
{
phase_source[i]=QuantumScale*GetGrayPixelComponent(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_source[i]-=0.5;
phase_source[i]*=(2.0*MagickPI);
i++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
phase_view=DestroyCacheView(phase_view);
magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*magnitude));
if (magnitude == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
magnitude_source=(double *) RelinquishMagickMemory(magnitude_source);
phase_source=(double *) RelinquishMagickMemory(phase_source);
return(MagickFalse);
}
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_source,magnitude);
magnitude_source=(double *) RelinquishMagickMemory(magnitude_source);
phase=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase));
if (phase == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
phase_source=(double *) RelinquishMagickMemory(phase_source);
return(MagickFalse);
}
CorrectPhaseLHS(fourier_info->width,fourier_info->width,phase_source);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_source,phase);
phase_source=(double *) RelinquishMagickMemory(phase_source);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier[i]=magnitude[i]*cos(phase[i])+I*magnitude[i]*sin(phase[i]);
#else
fourier[i][0]=magnitude[i]*cos(phase[i]);
fourier[i][1]=magnitude[i]*sin(phase[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier[i]=magnitude[i]+I*phase[i];
#else
fourier[i][0]=magnitude[i];
fourier[i][1]=phase[i];
#endif
i++;
}
phase=(double *) RelinquishMagickMemory(phase);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source;
fftw_plan
fftw_c2r_plan;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source));
if (source == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
{
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier,source,FFTW_ESTIMATE);
fftw_execute(fftw_c2r_plan);
fftw_destroy_plan(fftw_c2r_plan);
}
i=0L;
image_view=AcquireCacheView(image);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetRedPixelComponent(q,ClampToQuantum(QuantumRange*source[i]));
break;
}
case GreenChannel:
{
SetGreenPixelComponent(q,ClampToQuantum(QuantumRange*source[i]));
break;
}
case BlueChannel:
{
SetBluePixelComponent(q,ClampToQuantum(QuantumRange*source[i]));
break;
}
case OpacityChannel:
{
SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange*source[i]));
break;
}
case IndexChannel:
{
SetIndexPixelComponent(indexes+x,ClampToQuantum(QuantumRange*
source[i]));
break;
}
case GrayChannels:
{
SetGrayPixelComponent(q,ClampToQuantum(QuantumRange*source[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source=(double *) RelinquishMagickMemory(source);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude,
*phase;
fftw_complex
*fourier;
FourierInfo
fourier_info;
MagickBooleanType
status;
size_t
extent;
fourier_info.width=magnitude_image->columns;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*magnitude));
if (magnitude == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*phase));
if (phase == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(MagickFalse);
}
fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*fourier));
if (fourier == (fftw_complex *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
phase=(double *) RelinquishMagickMemory(phase);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(MagickFalse);
}
status=InverseFourier(&fourier_info,magnitude_image,phase_image,fourier,
exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,fourier,fourier_image,
exception);
fourier=(fftw_complex *) RelinquishMagickMemory(fourier);
phase=(double *) RelinquishMagickMemory(phase);
magnitude=(double *) RelinquishMagickMemory(magnitude);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickFalse,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
// When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name, const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
int err_code = TINYEXR_SUCCESS;
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<size_t> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
size_t tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data size.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
size_t data_size =
size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
if (tile_coordinates[3] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data length.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// TODO(LTE): atomic
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto &t : workers) {
t.join();
}
#else
}
#endif
if (err_code != TINYEXR_SUCCESS) {
return err_code;
}
exr_image->num_tiles = static_cast<int>(num_tiles);
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel (size_t i, std::string n)
: index(i)
, name(n)
{}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos)
continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
}
else if (ch.name == "G") {
idxG = int(ch.index);
}
else if (ch.name == "B") {
idxB = int(ch.index);
}
else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// TOOD(LTE): C++11 thread
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION |
memory_bench.c | #include "config.h"
#include <unistd.h>
#include <stdlib.h>
#include <CUnit/Basic.h>
#include <CUnit/Console.h>
#include <sys/time.h>
#include "mkl.h"
static double get_time() {
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + t.tv_usec * 1e-6;
}
static void* host_malloc(size_t size) { return malloc(size); }
static void host_free(void* p) { if (p) free(p); }
static void* gpu_malloc(size_t size) { return mkl_malloc(size, 4096); }
static void gpu_free(void* p) { if (p) mkl_free(p); }
/* invalidate_cpu_l2c - may invalidate CPU-unified L2C */
static void invalidate_cpu_l2c()
{
int i;
const int NLOOPS = 10;
size_t j;
const size_t SIZE = 16 * 1024 * 1024;
int * volatile p = host_malloc(SIZE);
const size_t LEN = SIZE / sizeof(*p);
for (i = 0; i < NLOOPS; i ++) {
p[LEN - 1] = p[0];
for (j = 0; j < LEN - 1; j ++)
p[j] = p[j + 1];
}
host_free(p);
}
static float rand_float_in_range(float from, float to) {
return ((float)rand() / RAND_MAX) * (to - from) + from;
}
const size_t N = 1024 * 1024;
static float* gpu_src_memory = NULL;
static float* host_src_memory = NULL;
static float expected = 0;
static int setup_suite() {
srand(0xDEADBEEF);
gpu_src_memory = (float*)gpu_malloc(N * sizeof(float));
size_t i = 0;
#pragma omp parallel for private(i)
for (i = 0; i < N; ++i) gpu_src_memory[i] = rand_float_in_range(-1.0, 1.0);
host_src_memory = (float*)host_malloc(N*sizeof(float));
memcpy(host_src_memory, gpu_src_memory, N*sizeof(float));
for (i = 0; i < N; ++i)
expected += host_src_memory[i];
return 0;
}
static int teardown_suite() {
gpu_free(gpu_src_memory);
host_free(host_src_memory);
expected = 0;
return 0;
}
#define TEST_MEMCPY(FROM, TO) \
static void test_memcpy_##FROM##_to_##TO() { \
float* TO##_dst_memory = (float*)TO##_malloc(N*sizeof(float)); \
invalidate_cpu_l2c(); \
double start = get_time(); \
memcpy(TO##_dst_memory, FROM##_src_memory, N*sizeof(float)); \
double end = get_time(); \
float actual = 0; \
size_t i = 0; \
for (i = 0; i < N; ++i) { \
actual += TO##_dst_memory[i]; \
} \
printf("\nmemcpy(4MB): %lf sec (%lf)\n", \
end - start, \
N * sizeof(float) / (end - start) / 1024 / 1024 \
); \
CU_ASSERT_DOUBLE_EQUAL(actual, expected, 0.001); \
TO##_free(TO##_dst_memory); \
}
TEST_MEMCPY(host, host);
TEST_MEMCPY(host, gpu);
TEST_MEMCPY(gpu, host);
TEST_MEMCPY(gpu, gpu);
static void suite_memcpy() {
CU_pSuite suite = CU_add_suite(
"memcpy",
setup_suite,
teardown_suite
);
CU_add_test(suite, "Host -> Host", test_memcpy_host_to_host);
CU_add_test(suite, "Host -> GPU", test_memcpy_host_to_gpu);
CU_add_test(suite, "GPU -> Host", test_memcpy_gpu_to_host);
CU_add_test(suite, "GPU -> GPU", test_memcpy_gpu_to_gpu);
}
#define TEST_MEMSET(TO) \
static void test_memset_to_##TO() { \
float* TO##_dst_memory = (float*)TO##_malloc(N*sizeof(float)); \
invalidate_cpu_l2c(); \
double start = get_time(); \
memset(TO##_dst_memory, 0, N*sizeof(float)); \
double end = get_time(); \
float actual = 0; \
size_t i = 0; \
for (i = 0; i < N; ++i) { \
actual += TO##_dst_memory[i]; \
} \
printf("\nmemset(4MB): %lf sec (%lf)\n", \
end - start, \
N * sizeof(float) / (end - start) / 1024 / 1024 \
); \
CU_ASSERT_DOUBLE_EQUAL(actual, 0, 0.001); \
TO##_free(TO##_dst_memory); \
}
TEST_MEMSET(host);
TEST_MEMSET(gpu);
static void suite_memset() {
CU_pSuite suite = CU_add_suite(
"memset",
setup_suite,
teardown_suite
);
CU_add_test(suite, "0 -> Host", test_memset_to_host);
CU_add_test(suite, "0 -> GPU", test_memset_to_gpu);
}
#define TEST_SUMALL(FROM) \
static void test_sumall_##FROM() { \
invalidate_cpu_l2c(); \
double start = get_time(); \
float actual = 0; \
size_t i = 0; \
for (i = 0; i < N; ++i) { \
actual += FROM##_src_memory[i]; \
} \
double end = get_time(); \
printf("\nsumall(4MB): %lf sec (%lf)\n", \
end - start, \
N * sizeof(float) / (end - start) / 1024 / 1024 \
); \
CU_ASSERT_DOUBLE_EQUAL(actual, expected, 0.001); \
}
TEST_SUMALL(host);
TEST_SUMALL(gpu);
static void suite_sumall() {
CU_pSuite suite = CU_add_suite(
"sumall",
setup_suite,
teardown_suite
);
CU_add_test(suite, "Host -> x", test_sumall_host);
CU_add_test(suite, "GPU -> x", test_sumall_gpu);
}
int main() {
CU_initialize_registry();
suite_memcpy();
suite_memset();
suite_sumall();
isatty(fileno(stdout)) ? CU_console_run_tests() : CU_basic_run_tests();
const unsigned int result = CU_get_number_of_failures();
CU_cleanup_registry();
return (result ? 1 : 0);
}
|
rakp_fmt_plug.c | /*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rakp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rakp);
#else
#include <string.h>
#include "arch.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "RAKP"
#define FORMAT_NAME "IPMI 2.0 RAKP (RMCP+)"
#ifdef SIMD_COEF_32
#define SHA1_N (SIMD_PARA_SHA1 * SIMD_COEF_32)
#endif
#define ALGORITHM_NAME "HMAC-SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 64
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_LENGTH (2 * PAD_SIZE)
#define SALT_ALIGN MEM_ALIGN_NONE
#define SALT_MIN_SIZE (PAD_SIZE - 8)
#define SALT_MAX_SIZE (2 * PAD_SIZE - 8 - 1)
#define FORMAT_TAG "$rakp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SHA1_N
#define MAX_KEYS_PER_CRYPT SHA1_N
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i) & 3)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * 4 * SIMD_COEF_32)
#else
#define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i) & (0xffffffff - 3)) * SIMD_COEF_32 + ((i) & 3) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * 4 * SIMD_COEF_32)
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$rakp$a4a3a2a03f0b000094272eb1ba576450b0d98ad10727a9fb0ab83616e099e8bf5f7366c9c03d36a3000000000000000000000000000000001404726f6f74$0ea27d6d5effaa996e5edc855b944e179a2f2434", "calvin"},
{"$rakp$c358d2a72f0c00001135f9b254c274629208b22f1166d94d2eba47f21093e9734355a33593da16f2000000000000000000000000000000001404726f6f74$41fce60acf2885f87fcafdf658d6f97db12639a9", "calvin"},
{"$rakp$b7c2d6f13a43dce2e44ad120a9cd8a13d0ca23f0414275c0bbe1070d2d1299b1c04da0f1a0f1e4e2537300263a2200000000000000000000140768617368636174$472bdabe2d5d4bffd6add7b3ba79a291d104a9ef", "hashcat"},
/* dummy hash for testing long salts */
{"$rakp$787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878$ba4ecc30a0b36a6ba0db862fc95201a81b9252ee", ""},
{NULL}
};
#ifdef SIMD_COEF_32
#define cur_salt rakp_cur_salt
static unsigned char *crypt_key;
static unsigned char *ipad, *prep_ipad;
static unsigned char *opad, *prep_opad;
JTR_ALIGN(MEM_ALIGN_SIMD) unsigned char cur_salt[2][SHA_BUF_SIZ * 4 * SHA1_N];
static int bufsize;
#else
static struct {
int length;
unsigned char salt[SALT_LENGTH];
} cur_salt;
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char (*opad)[PAD_SIZE];
static SHA_CTX *ipad_ctx;
static SHA_CTX *opad_ctx;
#endif
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int new_keys;
#define SALT_SIZE sizeof(cur_salt)
#ifdef SIMD_COEF_32
static void clear_keys(void)
{
memset(ipad, 0x36, bufsize);
memset(opad, 0x5C, bufsize);
}
#endif
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_32
int i;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * SHA_BUF_SIZ * 4;
crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt *
BINARY_SIZE,
sizeof(*prep_ipad), MEM_ALIGN_SIMD);
prep_opad = mem_calloc_align(self->params.max_keys_per_crypt *
BINARY_SIZE,
sizeof(*prep_opad), MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80;
((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (BINARY_SIZE + 64) << 3;
}
clear_keys();
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad));
opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad));
ipad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*ipad_ctx));
opad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void done(void)
{
MEM_FREE(saved_plain);
#ifdef SIMD_COEF_32
MEM_FREE(prep_opad);
MEM_FREE(prep_ipad);
#else
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
#endif
MEM_FREE(opad);
MEM_FREE(ipad);
MEM_FREE(crypt_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > SALT_MAX_SIZE * 2)
return 0;
if ((q - p - 1) < SALT_MIN_SIZE * 2)
return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q))
return 0;
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
return 1;
}
static void set_salt(void *salt)
{
memcpy(&cur_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
int len;
#ifdef SIMD_COEF_32
#if ARCH_LITTLE_ENDIAN==1
uint32_t *ipadp = (uint32_t*)&ipad[GETPOS(3, index)];
uint32_t *opadp = (uint32_t*)&opad[GETPOS(3, index)];
#else
uint32_t *ipadp = (uint32_t*)&ipad[GETPOS(0, index)];
uint32_t *opadp = (uint32_t*)&opad[GETPOS(0, index)];
#endif
const uint32_t *keyp = (uint32_t*)key;
unsigned int temp;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
unsigned char k0[BINARY_SIZE];
SHA_CTX ctx;
int i;
SHA1_Init(&ctx);
SHA1_Update(&ctx, key, len);
SHA1_Final(k0, &ctx);
keyp = (unsigned int*)k0;
for (i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32)
{
#if ARCH_LITTLE_ENDIAN==1
temp = JOHNSWAP(*keyp++);
#else
temp = *keyp++;
#endif
*ipadp ^= temp;
*opadp ^= temp;
}
}
else
#if ARCH_LITTLE_ENDIAN==1
while(((temp = JOHNSWAP(*keyp++)) & 0xff000000))
#else
while(((temp = *keyp++) & 0xff000000))
#endif
{
if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00))
{
temp &= 0xFFFF0000;
*ipadp ^= temp;
*opadp ^= temp;
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0x000000ff))
break;
ipadp += SIMD_COEF_32;
opadp += SIMD_COEF_32;
}
#else
int i;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
if (len > PAD_SIZE) {
SHA_CTX ctx;
unsigned char k0[BINARY_SIZE];
SHA1_Init(&ctx);
SHA1_Update(&ctx, key, len);
SHA1_Final(k0, &ctx);
len = BINARY_SIZE;
for (i = 0; i < len; i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
for (i = 0; i < len; i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
#endif
new_keys = 1;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x, y = 0;
for (;y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++)
for (x = 0; x < SIMD_COEF_32; x++)
{
// NOTE crypt_key is in input format (4*SHA_BUF_SIZ*SIMD_COEF_32)
if (((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x + y * SIMD_COEF_32 * SHA_BUF_SIZ])
return 1;
}
return 0;
#else
int index = 0;
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
for (index = 0; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < (BINARY_SIZE/4); i++)
// NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32)
if (((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#if _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
if (new_keys) {
SIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
SIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
}
SIMDSHA1body(cur_salt[0],
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body(cur_salt[1],
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
#else
SHA_CTX ctx;
if (new_keys) {
SHA1_Init(&ipad_ctx[index]);
SHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
SHA1_Init(&opad_ctx[index]);
SHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, cur_salt.salt, cur_salt.length);
SHA1_Final((unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, crypt_key[index], BINARY_SIZE);
SHA1_Final((unsigned char*) crypt_key[index], &ctx);
#endif
}
new_keys = 0;
return count;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1
alter_endianity(out, BINARY_SIZE);
#endif
return out;
}
static void *get_salt(char *ciphertext)
{
static unsigned char salt[SALT_LENGTH];
unsigned int i, len;
#ifdef SIMD_COEF_32
unsigned int j;
#endif
memset(salt, 0, sizeof(salt));
memset(&cur_salt, 0, sizeof(cur_salt));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
#ifdef SIMD_COEF_32
for (i = 0; i < len; i++)
for (j = 0; j < SHA1_N; ++j)
cur_salt[i>>6][GETPOS(i & 63, j)] = ((unsigned char*)salt)[i];
for (i = 0; i < SHA1_N; ++i)
cur_salt[len>>6][GETPOS(len & 63, i)] = 0x80;
for (j = len + 1; j < SALT_LENGTH; ++j)
for (i = 0; i < SHA1_N; ++i)
cur_salt[j>>6][GETPOS(j & 63, i)] = 0;
for (i = 0; i < SHA1_N; ++i)
((unsigned int*)cur_salt[1])[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (len + 64) << 3;
return &cur_salt;
#else
cur_salt.length = len;
memcpy(cur_salt.salt, salt, len);
return &cur_salt;
#endif
}
struct fmt_main fmt_rakp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_32
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num();
int size = omp_get_num_threads();
drandData[rank] = (struct) rank;
long int seed = rank;
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
#pragma omp parallel for reduction(+:Ncircle)
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
int rank = omp_get_thread_num();
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
free(drandData);
return 0;
}
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file ordering_op-inl.h
* \brief Function definition of ordering operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <string>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
#include "../../api/operator/op_utils.h"
namespace mshadow {
template <typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
}; // namespace mshadow
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType { kReturnValue, kReturnIndices, kReturnMask, kReturnBoth };
} // namespace topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>(-1))
.describe(
"Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1).describe(
"Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ)
.set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe(
"The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false).describe(
"Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
// TODO(srivrohi): remove support for real data type in mxnet-2.0
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("int64", mshadow::kInt64)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe(
"DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
std::string ReturnType2String(int ret_typ) {
switch (ret_typ) {
case topk_enum::kReturnValue:
return "value";
case topk_enum::kReturnIndices:
return "indices";
case topk_enum::kReturnMask:
return "mask";
case topk_enum::kReturnBoth:
return "both";
default:
LOG(FATAL) << "Unknown return type enum " << ret_typ;
}
LOG(FATAL) << "should not reach here ";
return "";
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, k_s, ret_typ_s, is_ascend_s, dtype_s;
axis_s << axis;
k_s << k;
dtype_s << dtype;
ret_typ_s << ret_typ;
is_ascend_s << is_ascend;
(*dict)["axis"] = axis_s.str();
(*dict)["k"] = k_s.str();
(*dict)["ret_typ"] = ReturnType2String(ret_typ);
(*dict)["is_ascend"] = is_ascend_s.str();
(*dict)["dtype"] = MXNetTypeWithBool2String(dtype);
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>(-1))
.describe(
"Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true).describe(
"Whether to sort in ascending or descending order.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, is_ascend_s;
axis_s << axis;
is_ascend_s << is_ascend;
(*dict)["axis"] = axis_s.str();
(*dict)["is_ascend_s"] = is_ascend_s.str();
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>(-1))
.describe(
"Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true).describe(
"Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
// TODO(srivrohi): remove support for real data type in mxnet-2.0
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("int64", mshadow::kInt64)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe(
"DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, is_ascend_s, dtype_s;
axis_s << axis;
is_ascend_s << is_ascend;
dtype_s << dtype;
(*dict)["axis"] = axis_s.str();
(*dict)["is_ascend_s"] = is_ascend_s.str();
(*dict)["dtype"] = MXNetTypeWithBool2String(dtype);
}
};
template <typename IDXType = index_t>
inline void ParseTopKParam(const TShape& src_shape,
const TopKParam& param,
TShape* target_shape,
size_t* batch_size,
IDXType* element_num,
int* axis,
IDXType* k,
bool* do_transpose,
bool* is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and " << src_shape.ndim()
<< ", found axis=" << *axis;
if (src_shape[*axis] != 0) {
*batch_size = src_shape.Size() / src_shape[*axis];
}
*element_num = src_shape[*axis];
if (*axis != src_shape.ndim() - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 0 && *k <= *element_num)
<< "k must be smaller than " << *element_num << ", get k = " << *k;
}
using namespace mshadow;
struct fill_ind_to_one {
template <typename DType, typename IDXType>
MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, DType* out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
template <typename DType, typename IDXType>
MSHADOW_XINLINE static void Map(index_t i,
const IDXType* indices,
const DType* val,
int req,
DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
template <typename DType, typename IDXType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, IDXType>& ind,
const Tensor<cpu, 1, char>& work,
IDXType K,
IDXType N,
bool is_ascend,
Stream<cpu>* s) {
// Use full sort when K is relatively large.
const bool full_sort(K * 8 > N);
// Batch size.
const size_t M(work.size(0) / (sizeof(DType) * N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(M); ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType* vals = reinterpret_cast<DType*>(work.dptr_);
DType* sorted_vals = dat.dptr_ + i * N;
IDXType* indices = ind.dptr_ + i * N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices + N, [&](const IDXType& i1, const IDXType& i2) {
return vals[i1] < vals[i2];
});
} else {
std::partial_sort(
indices, indices + K, indices + N, [&](const IDXType& i1, const IDXType& i2) {
return vals[i1] < vals[i2];
});
}
} else {
if (full_sort) {
std::sort(indices, indices + N, [&](const IDXType& i1, const IDXType& i2) {
return vals[i1] > vals[i2];
});
} else {
std::partial_sort(
indices, indices + K, indices + N, [&](const IDXType& i1, const IDXType& i2) {
return vals[i1] > vals[i2];
});
}
}
for (IDXType j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template <typename DType, typename IDXType>
MSHADOW_XINLINE bool TopKCompare(DType val1,
IDXType ind1,
DType val2,
IDXType ind2,
bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template <typename DType, typename IDXType>
MSHADOW_XINLINE void MergeTopK(IDXType K,
DType* val1,
IDXType* ind1,
DType* val2,
IDXType* ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
IDXType i1(K - 1), i2(K - 1);
for (IDXType i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (IDXType i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template <typename DType, typename IDXType>
__global__ void PartialSortSmallK(IDXType K, IDXType N, DType* val, IDXType* ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ __align__(sizeof(IDXType)) unsigned char temp_smem[];
IDXType* buff = reinterpret_cast<IDXType*>(temp_smem);
// Start of buffer sections associated with this thread.
const IDXType offset(threadIdx.x * K);
IDXType* ind_buff = reinterpret_cast<IDXType*>(&buff[offset]);
DType* val_buff = reinterpret_cast<DType*>(&buff[blockDim.x * K]) + offset;
// Initialize top-K values for this thread.
for (IDXType i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const IDXType first(blockIdx.x * N + threadIdx.x), last((blockIdx.x + 1) * N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (IDXType i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
IDXType cur_ind(ind[i]);
for (IDXType j = K;
j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend);) {
if (j + 1 < K) {
val_buff[j + 1] = val_buff[j];
ind_buff[j + 1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (IDXType s = (blockDim.x + 1) / 2, last_s = blockDim.x; last_s > 1;
last_s = s, s = (s + 1) / 2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x + s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff + s * K, ind_buff + s * K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (IDXType i = 0; i < K; ++i) {
ind[blockIdx.x * N + i] = ind_buff[i];
val[blockIdx.x * N + i] = val_buff[i];
}
}
}
template <typename DType, typename IDXType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, IDXType>& ind,
const Tensor<gpu, 1, char>& work,
IDXType K,
IDXType N,
bool is_ascend,
Stream<gpu>* s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const size_t M(dat.size(0) / N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
size_t alignment = std::max(sizeof(DType), sizeof(IDXType));
size_t id_size = PadBytes(sizeof(IDXType) * ind.size(0), alignment);
Tensor<gpu, 1, IDXType> batch_id(
reinterpret_cast<IDXType*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_ + id_size, Shape1(work.size(0) - id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const IDXType nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M,
nthreads,
nthreads * K*(sizeof(IDXType) + sizeof(DType)),
mshadow::Stream<gpu>::GetStream(s)>>>(
K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template <typename xpu, typename DType, typename IDType, typename IDXType>
void TopKImpl(const RunContext& ctx,
const Resource& resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 0. If input shape is 0-shape, directly return
if (src.Size() == 0)
return;
// 1. Parse and initialize information
Stream<xpu>* s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, IDXType> indices, sel_indices;
size_t batch_size = 0;
IDXType element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
IDXType k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(IDXType));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_,
param,
&target_shape,
&batch_size,
&element_num,
&axis,
&k,
&do_transpose,
&is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>())
<< "'index_t' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is " << element_num
<< ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<IDXType>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
// Temp space needed by the full sorts.
size_t temp_size = std::max(mxnet::op::SortByKeyWorkspaceSize<IDXType, DType, xpu>(src.Size()),
mxnet::op::SortByKeyWorkspaceSize<DType, IDXType, xpu>(src.Size()));
temp_size =
std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<IDXType, IDXType, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(IDXType) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(DType) * src.Size());
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) +
PadBytes(sizeof(IDXType) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(IDXType) * batch_size * k, alignment);
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(
reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, IDXType>(reinterpret_cast<IDXType*>(workspace_curr_ptr),
Shape1(src.Size()),
s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(IDXType) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, IDXType>(
reinterpret_cast<IDXType*>(workspace_curr_ptr), Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(IDXType) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(
reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(
reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType) * src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s,
batch_size * element_num,
1,
IDXType{0},
IDXType{1},
kWriteTo,
reinterpret_cast<IDXType*>(indices.dptr_));
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
mxnet::TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(
sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(
s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(
ret_indices,
req[0],
tcast<IDType>(F<mshadow_op::mod>(
transpose(
slice<2>(inplace_reshape(
indices,
Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)),
element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices,
req[0],
tcast<IDType>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(
ret_value,
req[0],
transpose(slice<2>(inplace_reshape(
sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)));
ASSIGN_DISPATCH(
ret_indices,
req[1],
tcast<IDType>(F<mshadow_op::mod>(
transpose(
slice<2>(inplace_reshape(
indices,
Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)),
element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value,
req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices,
req[1],
tcast<IDType>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
}
}
template <typename xpu, typename DType>
size_t TopKWorkspaceSize(const TBlob& src, const TopKParam& param, size_t* temp_size_ptr) {
using namespace mshadow;
using namespace mshadow::expr;
size_t batch_size = 0;
size_t temp_size;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_,
param,
&target_shape,
&batch_size,
&element_num,
&axis,
&k,
&do_transpose,
&is_ascend);
// Temp space needed by the full sorts.
temp_size = std::max(mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()),
mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size()));
temp_size =
std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(DType) * src.Size());
*temp_size_ptr = temp_size;
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) +
PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment);
}
return workspace_size;
}
template <typename xpu, typename DType, typename IDType>
void TopKImplwithWorkspace(const RunContext& ctx,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param,
char* workspace_curr_ptr,
const size_t& temp_size,
Stream<xpu>* s) {
using namespace mshadow;
using namespace mshadow::expr;
// 0. If input shape is 0-shape, directly return
if (src.Size() == 0)
return;
// 1. Parse and initialize information
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, index_t> indices, sel_indices;
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(index_t));
mxnet::TShape target_shape;
ParseTopKParam(src.shape_,
param,
&target_shape,
&batch_size,
&element_num,
&axis,
&k,
&do_transpose,
&is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>())
<< "'index_t' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is " << element_num
<< ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<index_t>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
sorted_dat = Tensor<xpu, 1, DType>(
reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr),
Shape1(src.Size()),
s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, index_t>(
reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(
reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(
reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType) * src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(
s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
mxnet::TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(
sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(
s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(
ret_indices,
req[0],
tcast<IDType>(F<mshadow_op::mod>(
transpose(
slice<2>(inplace_reshape(
indices,
Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)),
element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices,
req[0],
tcast<IDType>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(
ret_value,
req[0],
transpose(slice<2>(inplace_reshape(
sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)));
ASSIGN_DISPATCH(
ret_indices,
req[1],
tcast<IDType>(F<mshadow_op::mod>(
transpose(
slice<2>(inplace_reshape(
indices,
Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)),
0,
k),
Shape3(0, 2, 1)),
element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value,
req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices,
req[1],
tcast<IDType>(F<mshadow_op::mod>(
slice<1>(inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
}
}
template <typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_NO_BFLOAT16_TYPE_SWITCH(param.dtype, IDType, {
if (inputs[0].Size() >= INT_MAX) {
TopKImpl<xpu, DType, IDType, index_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
} else {
TopKImpl<xpu, DType, IDType, int32_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
}
});
});
} else {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].Size() >= INT_MAX) {
TopKImpl<xpu, DType, index_t, index_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
} else {
TopKImpl<xpu, DType, index_t, int32_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
}
});
}
}
template <typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].Size() >= INT_MAX) {
TopKImpl<xpu, DType, index_t, index_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
} else {
TopKImpl<xpu, DType, index_t, int32_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
}
});
}
template <typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
if (inputs[0].Size() >= INT_MAX) {
TopKImpl<xpu, DType, IDType, index_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
} else {
TopKImpl<xpu, DType, IDType, int32_t>(
ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
}
});
});
}
template <typename xpu, typename DType, typename IDType, typename IDXType>
void TopKBackwardImpl(const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
size_t batch_size = 0;
IDXType element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
IDXType k = 0;
mxnet::TShape target_shape;
ParseTopKParam(outputs[0].shape_,
param,
&target_shape,
&batch_size,
&element_num,
&axis,
&k,
&do_transpose,
&is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>())
<< "'IDType' does not have a sufficient precision to represent "
<< "the indices of the input array. The total element_num is " << element_num
<< ", but the selected index_t can only represent "
<< mxnet::common::MaxIntegerValue<IDXType>() << " elements";
Tensor<xpu, 1, IDXType> workspace =
ctx.requested[0].get_space_typed<xpu, 1, IDXType>(Shape1(batch_size * k + batch_size), s);
Tensor<xpu, 1, IDXType> sel_indices =
Tensor<xpu, 1, IDXType>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, IDXType> batch_shift =
Tensor<xpu, 1, IDXType>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(
s, batch_size, 1, IDXType{0}, element_num, kWriteTo, batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(
transpose(broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)),
mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<IDXType>(indices);
sel_indices = transpose_indices(
sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(
tcast<IDXType>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
mxnet::TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0] || kAddTo == req[0]) {
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
}
mxnet_op::Kernel<fill_ind, xpu>::Launch(
s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_);
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template <typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
if (inputs[0].Size() >= INT_MAX) {
TopKBackwardImpl<xpu, DType, IDType, index_t>(ctx, inputs, req, outputs, param);
} else {
TopKBackwardImpl<xpu, DType, IDType, int32_t>(ctx, inputs, req, outputs, param);
}
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].Size() >= INT_MAX) {
TopKBackwardImpl<xpu, DType, index_t, index_t>(ctx, inputs, req, outputs, param);
} else {
TopKBackwardImpl<xpu, DType, index_t, int32_t>(ctx, inputs, req, outputs, param);
}
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
// out_attr[0] -> stores value
// out_attr[1] -> stores indices
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
#if MXNET_USE_INT64_TENSOR_SIZE == 1
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64))
#else
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
#endif
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices.";
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
mxnet::TShape& in_shape = (*in_attrs)[0];
size_t batch_size = 0;
index_t element_num = 0; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
index_t k = 0;
mxnet::TShape target_shape;
ParseTopKParam(in_shape,
param,
&target_shape,
&batch_size,
&element_num,
&axis,
&k,
&do_transpose,
&is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
#if MXNET_USE_INT64_TENSOR_SIZE == 1
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64))
#else
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
#endif
<< "Failed to set the type of ret_indices";
CHECK(type_assign(&data_type, (*in_attrs)[0]))
<< "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0]))
<< "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type))
<< "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type))
<< "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0];
if (data_type == -1)
return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
utils.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "splittable_mrg.h"
#include "graph_generator.h"
#include <stdint.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef __MTA__
#include <sys/mta_task.h>
#endif
#ifdef GRAPH_GENERATOR_MPI
#include <mpi.h>
#endif
#ifdef GRAPH_GENERATOR_OMP
#include <omp.h>
#endif
#include "utils.h"
#if defined(_OPENMP)
#define OMP(x_) _Pragma(x_)
#else
#define OMP(x_)
#endif
#if defined(HAVE_LIBNUMA)
#include <numa.h>
static int numa_inited = 0;
static int numa_avail = -1;
void *
xmalloc (size_t sz)
{
void * out;
if (!numa_inited) {
OMP("omp critical") {
numa_inited = 1;
numa_avail = numa_available ();
}
}
if (numa_avail)
out = numa_alloc (sz);
else
out = malloc (sz);
if (!out) {
fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz);
abort ();
}
return out;
}
void *
xcalloc (size_t n, size_t sz)
{
void * out;
if (!numa_inited) {
OMP("omp critical") {
numa_inited = 1;
numa_avail = numa_available ();
}
}
if (numa_avail) {
size_t to_alloc;
to_alloc = n * sz;
if (to_alloc < n || to_alloc < sz) {
fprintf(stderr,
"Allocation size out of range for %zu items of %zu byte(s)\n",
n, sz);
abort ();
}
out = numa_alloc (n * sz);
#if defined(_OPENMP)
#pragma omp parallel for
for (size_t k = 0; k < n; ++k)
memset (out + k * sz, 0, sz);
#else
memset (out, 0, n * sz);
#endif
} else
out = calloc (n, sz);
if (!out) {
fprintf(stderr,
"Out of memory trying to allocate/clear %zu items of %zu byte(s)\n",
n, sz);
abort ();
}
return out;
}
void
xfree (void * p, size_t sz)
{
if (!p) return;
if (numa_avail >= 0)
numa_free (p, sz);
else
free (p);
}
#else
void *
xmalloc (size_t sz)
{
void * out;
out = malloc (sz);
if (!out) {
fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz);
abort ();
}
return out;
}
void *
xcalloc (size_t n, size_t sz)
{
void * out;
out = calloc (n, sz);
if (!out) {
fprintf(stderr,
"Out of memory trying to allocate/clear %zu items of %zu byte(s)\n",
n, sz);
abort ();
}
return out;
}
void
xfree (void * p, size_t sz)
{
free (p);
}
#endif
/* Spread the two 64-bit numbers into five nonzero values in the correct
* range. */
void make_mrg_seed(uint64_t userseed1, uint64_t userseed2, uint_fast32_t* seed) {
seed[0] = (uint32_t)(userseed1 & UINT32_C(0x3FFFFFFF)) + 1;
seed[1] = (uint32_t)((userseed1 >> 30) & UINT32_C(0x3FFFFFFF)) + 1;
seed[2] = (uint32_t)(userseed2 & UINT32_C(0x3FFFFFFF)) + 1;
seed[3] = (uint32_t)((userseed2 >> 30) & UINT32_C(0x3FFFFFFF)) + 1;
seed[4] = (uint32_t)((userseed2 >> 60) << 4) + (uint32_t)(userseed1 >> 60) + 1;
}
|
conv3x3s1_winograd64_neon5_BdB.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_neon5_BdB(const Mat& bottom_blob, Mat& top_blob, const Option& opt,
int inch, int outw, int outh, int outch)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
// BEGIN transform input
Mat bottom_blob_tm = top_blob;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
bottom_blob_tm.create(1, 64 * tiles, inch, 4u);
// bottom_blob_tm.create(inch, tiles, 64);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
// the assembly block for armv7 input transform requires 13 general registers
// old gcc may fail to allocate register on debug build without -fomit-frame-pointer
// so, fallback to intrinsic version for armv7 debug build --- nihui
#if __aarch64__ || !defined(NDEBUG)
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0 += img0_tm.w*tiles*25;
r0_tm1 += img0_tm.w*tiles*25;
r0_tm2 += img0_tm.w*tiles*25;
r0_tm3 += img0_tm.w*tiles*25;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*24);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles*32);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*40);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*48);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*56);
int step = img0_tm.w*tiles*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%0], %26 \n"
"vst1.f32 {d6[1]}, [%1], %26 \n"
"vst1.f32 {d7[0]}, [%2], %26 \n"
"vst1.f32 {d7[1]}, [%3], %26 \n"
"vst1.f32 {d12[0]}, [%0] \n"
"vst1.f32 {d12[1]}, [%1] \n"
"vst1.f32 {d13[0]}, [%2] \n"
"vst1.f32 {d13[1]}, [%3] \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%4], %26 \n"
"vst1.f32 {d6[1]}, [%5], %26 \n"
"vst1.f32 {d7[0]}, [%6], %26 \n"
"vst1.f32 {d7[1]}, [%7], %26 \n"
"vst1.f32 {d12[0]}, [%4] \n"
"vst1.f32 {d12[1]}, [%5] \n"
"vst1.f32 {d13[0]}, [%6] \n"
"vst1.f32 {d13[1]}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm1_0), // %1
"=r"(r0_tm2_0), // %2
"=r"(r0_tm3_0), // %3
"=r"(r0_tm0_4), // %4
"=r"(r0_tm1_4), // %5
"=r"(r0_tm2_4), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm1_0),
"2"(r0_tm2_0),
"3"(r0_tm3_0),
"4"(r0_tm0_4),
"5"(r0_tm1_4),
"6"(r0_tm2_4),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_1[0] = tmp12a + tmp12b;
r0_tm_2[0] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_3[0] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_5[0] = tmp56a + tmp56b;
r0_tm_6[0] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 8;
r0_tm_1 += img0_tm.w * tiles * 8;
r0_tm_2 += img0_tm.w * tiles * 8;
r0_tm_3 += img0_tm.w * tiles * 8;
r0_tm_4 += img0_tm.w * tiles * 8;
r0_tm_5 += img0_tm.w * tiles * 8;
r0_tm_6 += img0_tm.w * tiles * 8;
r0_tm_7 += img0_tm.w * tiles * 8;
}
#endif // __ARM_NEON
}
}
}
}
}
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(32*t2-Nz-508,512)),ceild(32*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(16*t1+Nx+29,512)),floord(32*t2+Nx+28,512)),floord(32*t3+Nx+28,512)),floord(32*t1-32*t2+Nz+Nx+27,512));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),512*t4+510),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
spmmd_x_csr_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *matA, const ALPHA_SPMAT_CSR *matB, ALPHA_Number *matC, const ALPHA_INT ldc)
{
if (matA->cols != matB->rows || ldc < matA->rows)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT m = matA->rows;
for(ALPHA_INT i = 0; i < matA->rows; i++)
for(ALPHA_INT j = 0; j < matB->cols; j++)
{
alpha_setzero(matC[index2(j, i, ldc)]);
}
ALPHA_INT num_thread = alpha_get_thread_num();
ALPHA_INT64 *flop = (ALPHA_INT64 *)alpha_malloc(matB->cols * sizeof(ALPHA_INT64));
memset(flop, '\0', m * sizeof(ALPHA_INT64));
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT ar = 0; ar < m; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
flop[ar] += matB->rows_end[br] - matB->rows_start[br];
}
}
for (ALPHA_INT i = 1; i < m; i++)
{
flop[i] += flop[i - 1];
}
ALPHA_INT partition[num_thread + 1];
balanced_partition_row_by_flop(flop, m, num_thread, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_thread)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_m_s = partition[tid];
ALPHA_INT local_m_e = partition[tid + 1];
for (ALPHA_INT ar = local_m_s; ar < local_m_e; ar++)
{
for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++)
{
ALPHA_INT br = matA->col_indx[ai];
ALPHA_Number av = matA->values[ai];
for (ALPHA_INT bi = matB->rows_start[br]; bi < matB->rows_end[br]; bi++)
{
ALPHA_INT bc = matB->col_indx[bi];
ALPHA_Number bv = matB->values[bi];
alpha_madde(matC[index2(bc,ar,ldc)], av, bv);
}
}
}
}
alpha_free(flop);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
tinyrenderer.h | /**
Tiny Renderer, https://github.com/ssloy/tinyrenderer
Copyright Dmitry V. Sokolov
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include <OgreVector.h>
#include <OgreMatrix4.h>
namespace Ogre {
typedef Vector<2, float> vec2;
typedef Vector<3, float> vec3;
typedef Vector<4, float> vec4;
typedef Vector<3, uchar> vec3b;
typedef Vector<4, uchar> vec4b;
typedef Matrix3 mat3;
typedef Matrix4 mat4;
static vec3 barycentric(const vec2 tri[3], const vec2& P) {
mat3 ABC(tri[0].x, tri[1].x, tri[2].x,
tri[0].y, tri[1].y, tri[2].y,
1, 1, 1);
//if (ABC.determinant()<1e-6) return vec3(-1,1,1); // for a degenerate triangle generate negative coordinates, it will be thrown away by the rasterizator
return ABC.inverse() * vec3(P.x, P.y, 1);
}
static float cross(const vec2 &v1, const vec2 &v2) {
return v1.x * v2.y - v1.y * v2.x;
}
/// triangle screen coordinates before persp. division
static void triangle(const mat4& Viewport, const vec4 clip_verts[3], IShader& shader, Image& image,
Image& zbuffer, bool depthCheck, bool depthWrite, bool blendAdd, bool doCull)
{
vec4 pts[3] = { Viewport*clip_verts[0], Viewport*clip_verts[1], Viewport*clip_verts[2] }; // triangle screen coordinates before persp. division
for (int i = 0; i < 3; i++)
{
float w = pts[i][3];
pts[i] /= w;
pts[i][3] = 1 / w;
}
vec2 pts2[3] = { pts[0].xy(), pts[1].xy(), pts[2].xy() }; // triangle screen coordinates after perps. division
if(doCull && cross(pts2[2] - pts2[0], pts2[2] - pts2[1]) > 0)
return; // culled
vec2 bboxmin( std::numeric_limits<float>::max(), std::numeric_limits<float>::max());
vec2 bboxmax(-std::numeric_limits<float>::max(), -std::numeric_limits<float>::max());
vec2 clamp(image.getWidth()-1, image.getHeight()-1);
for (int i=0; i<3; i++)
for (int j=0; j<2; j++) {
bboxmin[j] = std::max(0.f, std::min(bboxmin[j], pts2[i][j]));
bboxmax[j] = std::min(clamp[j], std::max(bboxmax[j], pts2[i][j]));
}
#pragma omp parallel for
for (int x=(int)bboxmin.x; x<=(int)bboxmax.x; x++) {
for (int y=(int)bboxmin.y; y<=(int)bboxmax.y; y++) {
vec3 bc_screen = barycentric(pts2, vec2(x, y));
vec3 bc_clip = vec3(bc_screen.x*pts[0][3], bc_screen.y*pts[1][3], bc_screen.z*pts[2][3]);
bc_clip = bc_clip/(bc_clip.x+bc_clip.y+bc_clip.z); // check https://github.com/ssloy/tinyrenderer/wiki/Technical-difficulties-linear-interpolation-with-perspective-deformations
float frag_depth = vec3(pts[0][2], pts[1][2], pts[2][2]).dotProduct(bc_clip);
if (bc_screen.x<0 || bc_screen.y<0 || bc_screen.z<0) continue;
if (frag_depth < 0.0)
continue;
if(depthCheck && frag_depth > *zbuffer.getData<float>(x, y))
continue;
ColourValue fragColour;
bool discard = shader.fragment(bc_clip, fragColour);
if (discard) continue;
auto& dst = *image.getData<vec3b>(x, y);
if(blendAdd)
fragColour += ColourValue(vec4b(dst[0], dst[1], dst[2], 0).ptr());
fragColour.saturate();
fragColour *= 255;
dst = vec3b(fragColour.ptr());
if (depthWrite)
*zbuffer.getData<float>(x, y) = frag_depth;
}
}
}
} |
copyPfloatToDfloat.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(copyPfloatToDfloat) (const dlong & N,
const pfloat * __restrict__ y,
dfloat * __restrict__ x){
#ifdef __NEKRS__OMP__
#pragma omp parallel for
#endif
for(dlong n=0;n<N;++n){
x[n]=y[n];
}
}
|
GB_binop__land_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_bool)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__land_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_bool)
// A*D function (colscale): GB (_AxD__land_bool)
// D*A function (rowscale): GB (_DxB__land_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__land_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__land_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_bool)
// C=scalar+B GB (_bind1st__land_bool)
// C=scalar+B' GB (_bind1st_tran__land_bool)
// C=A+scalar GB (_bind2nd__land_bool)
// C=A'+scalar GB (_bind2nd_tran__land_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij && bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x && y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x && bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij && y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x && aij) ; \
}
GrB_Info GB (_bind1st_tran__land_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij && y) ; \
}
GrB_Info GB (_bind2nd_tran__land_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 4296.0
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *excetion)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
EdgeInfo
*p;
ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=CastDoubleToLong(ceil(edge.y1-0.5));
stop=CastDoubleToLong(floor(edge.y2+0.5));
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
ssize_t
x;
Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong(
ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor(
inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5));
x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
double
dx,
dy;
ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
Quantum
*magick_restrict q;
ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat*PerceptibleReciprocal(gradient->radius);
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const double pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
quantum=sizeof(**mvg_info->primitive_info);
extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*quantum;
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
if (extent == (double) CastDoubleToLong(extent))
{
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) (extent+1),quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++)
{
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
(*mvg_info->primitive_info)[i].text=(char *) NULL;
}
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (
(PrimitiveExtentPad+1)*quantum));
(void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)*
quantum));
*mvg_info->extent=1;
mvg_info->offset=0;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
const char
*p;
ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=(size_t) PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(number_points+1),sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) (number_points+1)*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((graphic_context[n]->render != MagickFalse) &&
(mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
region;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
region.x=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.y=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.width=(size_t) CastDoubleToLong(floor(GetDrawValue(
token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) region.width,(double)
region.height,(double) region.x,(double) region.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine(
&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(double)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < (ssize_t) polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < (ssize_t) polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
const PointInfo
*q;
EdgeInfo
*p;
ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
EdgeInfo
*p;
ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info,exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) &&
(y == CastDoubleToLong(ceil(primitive_info->point.y-0.5))))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
ssize_t
i,
x;
ssize_t
coordinates,
y;
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if ((LocaleCompare(clone_info->magick,"file") == 0) ||
(LocaleCompare(clone_info->magick,"https") == 0) ||
(LocaleCompare(clone_info->magick,"http") == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0) ||
(IsPathAccessible(clone_info->filename) != MagickFalse))
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5));
y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5));
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
point_x,
point_y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
double
cosine,
sine;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5*
MagickPI+MagickEpsilon)))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
PrimitiveInfo
*q;
ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
PrimitiveInfo
*p;
ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
double
dx,
dy;
ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
DisableMSCWarning(4127)
CheckPathExtent(MaxStrokePad,MaxStrokePad);
RestoreMSCWarning
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.
q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid))))));
DisableMSCWarning(4127)
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
RestoreMSCWarning
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p-
theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid)))))));
DisableMSCWarning(4127)
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
RestoreMSCWarning
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
mask.h | // This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// All rights reserved.
#ifndef MASK_H
#define MASK_H
#include <omp.h>
/**
*
* Function to apply a 3x3 mask to an image
*
*/
void
mask3x3 (const float *input, //input image
float *output, //output image
const int nx, //image width
const int ny, //image height
const float *mask //mask to be applied
)
{
//apply the mask to the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
for (int j = 1; j < nx - 1; j++)
{
double sum = 0;
for (int l = 0; l < 3; l++)
{
for (int m = 0; m < 3; m++)
{
int p = (i + l - 1) * nx + j + m - 1;
sum += input[p] * mask[l * 3 + m];
}
}
int k = i * nx + j;
output[k] = sum;
}
}
//apply the mask to the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++)
{
double sum = 0;
sum += input[j - 1] * (mask[0] + mask[3]);
sum += input[j] * (mask[1] + mask[4]);
sum += input[j + 1] * (mask[2] + mask[5]);
sum += input[nx + j - 1] * mask[6];
sum += input[nx + j] * mask[7];
sum += input[nx + j + 1] * mask[8];
output[j] = sum;
sum = 0;
sum += input[(ny - 2) * nx + j - 1] * mask[0];
sum += input[(ny - 2) * nx + j] * mask[1];
sum += input[(ny - 2) * nx + j + 1] * mask[2];
sum += input[(ny - 1) * nx + j - 1] * (mask[6] + mask[3]);
sum += input[(ny - 1) * nx + j] * (mask[7] + mask[4]);
sum += input[(ny - 1) * nx + j + 1] * (mask[8] + mask[5]);
output[(ny - 1) * nx + j] = sum;
}
//apply the mask to the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
double sum = 0;
sum += input[(i - 1) * nx] * (mask[0] + mask[1]);
sum += input[(i - 1) * nx + 1] * mask[2];
sum += input[i * nx] * (mask[3] + mask[4]);
sum += input[i * nx + 1] * mask[5];
sum += input[(i + 1) * nx] * (mask[6] + mask[7]);
sum += input[(i + 1) * nx + 1] * mask[8];
output[i * nx] = sum;
sum = 0;
sum += input[i * nx - 2] * mask[0];
sum += input[i * nx - 1] * (mask[1] + mask[2]);
sum += input[(i + 1) * nx - 2] * mask[3];
sum += input[(i + 1) * nx - 1] * (mask[4] + mask[5]);
sum += input[(i + 2) * nx - 2] * mask[6];
sum += input[(i + 2) * nx - 1] * (mask[7] + mask[8]);
output[i * nx + nx - 1] = sum;
}
//apply the mask to the four corners
output[0] = input[0] * (mask[0] + mask[1] + mask[3] + mask[4]) +
input[1] * (mask[2] + mask[5]) +
input[nx] * (mask[6] + mask[7]) + input[nx + 1] * mask[8];
output[nx - 1] =
input[nx - 2] * (mask[0] + mask[3]) +
input[nx - 1] * (mask[1] + mask[2] + mask[4] + mask[5]) +
input[2 * nx - 2] * mask[6] + input[2 * nx - 1] * (mask[7] + mask[8]);
output[(ny - 1) * nx] =
input[(ny - 2) * nx] * (mask[0] + mask[1]) +
input[(ny - 2) * nx + 1] * mask[2] +
input[(ny - 1) * nx] * (mask[3] + mask[4] + mask[6] + mask[7]) +
input[(ny - 1) * nx + 1] * (mask[5] + mask[8]);
output[ny * nx - 1] =
input[(ny - 1) * nx - 2] * mask[0] +
input[(ny - 1) * nx - 1] * (mask[1] + mask[2]) +
input[ny * nx - 2] * (mask[3] + mask[6]) +
input[ny * nx - 1] * (mask[4] + mask[5] + mask[7] + mask[8]);
}
/**
*
* Compute the second order X derivative
*
*/
void
Dxx (const float *I, //input image
float *Ixx, //oputput derivative
const int nx, //image width
const int ny //image height
)
{
//mask of second derivative
float M[] = { 0., 0., 0.,
1., -2., 1.,
0., 0., 0.
};
//computing the second derivative
mask3x3 (I, Ixx, nx, ny, M);
}
/**
*
* Compute the second order Y derivative
*
*/
void
Dyy (const float *I, //input image
float *Iyy, //oputput derivative
const int nx, //image width
const int ny //image height
)
{
//mask of second derivative
float M[] = { 0., 1., 0.,
0., -2., 0.,
0., 1., 0.
};
//computing the second derivative
mask3x3 (I, Iyy, nx, ny, M);
}
/**
*
* Compute the second order XY derivative
*
*/
void
Dxy (const float *I, //input image
float *Ixy, //oputput derivative
const int nx, //image width
const int ny //image height
)
{
//mask of second derivative
float M[] = { 1. / 4., 0., -1. / 4.,
0., 0., 0.,
-1. / 4., 0., 1. / 4.
};
//computing the second derivative
mask3x3 (I, Ixy, nx, ny, M);
}
/**
*
* Compute the gradient with central differences
*
*/
void
gradient (const float *input, //input image
float *dx, //computed x derivative
float *dy, //computed y derivative
const int nx, //image width
const int ny //image height
)
{
//gradient in the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
for (int j = 1; j < nx - 1; j++)
{
const int k = i * nx + j;
dx[k] = 0.5 * (input[k + 1] - input[k - 1]);
dy[k] = 0.5 * (input[k + nx] - input[k - nx]);
}
}
//gradient in the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++)
{
dx[j] = 0.5 * (input[j + 1] - input[j - 1]);
dy[j] = 0.5 * (input[j + nx] - input[j]);
const int k = (ny - 1) * nx + j;
dx[k] = 0.5 * (input[k + 1] - input[k - 1]);
dy[k] = 0.5 * (input[k] - input[k - nx]);
}
//gradient in the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
const int p = i * nx;
dx[p] = 0.5 * (input[p + 1] - input[p]);
dy[p] = 0.5 * (input[p + nx] - input[p - nx]);
const int k = (i + 1) * nx - 1;
dx[k] = 0.5 * (input[k] - input[k - 1]);
dy[k] = 0.5 * (input[k + nx] - input[k - nx]);
}
//calculate the gradient in the corners
dx[0] = 0.5 * (input[1] - input[0]);
dy[0] = 0.5 * (input[nx] - input[0]);
dx[nx - 1] = 0.5 * (input[nx - 1] - input[nx - 2]);
dy[nx - 1] = 0.5 * (input[2 * nx - 1] - input[nx - 1]);
dx[(ny - 1) * nx] =
0.5 * (input[(ny - 1) * nx + 1] - input[(ny - 1) * nx]);
dy[(ny - 1) * nx] = 0.5 * (input[(ny - 1) * nx] - input[(ny - 2) * nx]);
dx[ny * nx - 1] = 0.5 * (input[ny * nx - 1] - input[ny * nx - 1 - 1]);
dy[ny * nx - 1] = 0.5 * (input[ny * nx - 1] - input[(ny - 1) * nx - 1]);
}
/**
*
* Compute the coefficients of the divergence term
*
*/
void
psi_divergence (const float *psi, //robust functional
float *psi1, //coefficients of divergence
float *psi2, //coefficients of divergence
float *psi3, //coefficients of divergence
float *psi4, //coefficients of divergence
const int nx, //image width
const int ny //image height
)
{
//calculate coefficients in the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++){
for (int j = 1; j < nx - 1; j++){
const int k = i * nx + j;
psi1[k] = 0.5 * (psi[k + nx] + psi[k]);
psi2[k] = 0.5 * (psi[k - nx] + psi[k]);
psi3[k] = 0.5 * (psi[k + 1] + psi[k]);
psi4[k] = 0.5 * (psi[k - 1] + psi[k]);
}
}
//calculate coefficients in the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++)
{
psi1[j] = 0.5 * (psi[j + nx] + psi[j]);
psi2[j] = 0;
psi3[j] = 0.5 * (psi[j + 1] + psi[j]);
psi4[j] = 0.5 * (psi[j - 1] + psi[j]);
const int k = (ny - 1) * nx + j;
psi1[k] = 0;
psi2[k] = 0.5 * (psi[k - nx] + psi[k]);
psi3[k] = 0.5 * (psi[k + 1] + psi[k]);
psi4[k] = 0.5 * (psi[k - 1] + psi[k]);
}
//calculate coefficients in the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
const int k = i * nx;
psi1[k] = 0.5 * (psi[k + nx] + psi[k]);
psi2[k] = 0.5 * (psi[k - nx] + psi[k]);
psi3[k] = 0.5 * (psi[k + 1] + psi[k]);
psi4[k] = 0;
const int j = (i + 1) * nx - 1;
psi1[j] = 0.5 * (psi[j + nx] + psi[j]);
psi2[j] = 0.5 * (psi[j - nx] + psi[j]);
psi3[j] = 0;
psi4[j] = 0.5 * (psi[j - 1] + psi[j]);
}
//up-left corner (0,0)
psi1[0] = 0.5 * (psi[nx] + psi[0]);
psi3[0] = 0.5 * (psi[1] + psi[0]);
psi2[0] = psi4[0] = 0;
//up-right corner (nx,0)
psi1[nx - 1] = 0.5 * (psi[nx - 1 + nx] + psi[nx - 1]);
psi4[nx - 1] = 0.5 * (psi[nx - 2] + psi[nx - 1]);
psi2[nx - 1] = psi3[nx - 1] = 0;
//bottom-left corner (0,ny)
psi2[(ny - 1) * nx] = 0.5 * (psi[(ny - 2) * nx] + psi[(ny - 1) * nx]);
psi3[(ny - 1) * nx] = 0.5 * (psi[(ny - 1) * nx + 1] + psi[(ny - 1) * nx]);
psi1[(ny - 1) * nx] = psi4[(ny - 1) * nx] = 0;
//bottom-right corner (nx,ny)
psi2[ny * nx - 1] = 0.5 * (psi[ny * nx - 1 - nx] + psi[ny * nx - 1]);
psi4[ny * nx - 1] = 0.5 * (psi[ny * nx - 2] + psi[ny * nx - 1]);
psi1[ny * nx - 1] = psi3[ny * nx - 1] = 0;
}
/**
*
* Compute the divergence of the optical flow
*
*/
void
divergence_u (const float *u, //x component of optical flow
const float *v, //y component of optical flow
const float *psi1, //coefficients of divergence
const float *psi2, //coefficients of divergence
const float *psi3, //coefficients of divergence
const float *psi4, //coefficients of divergence
float *div_u, //computed divergence for u
float *div_v, //computed divergence for v
const int nx, //image width
const int ny //image height
)
{
//calculate the divergence in the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
for (int j = 1; j < nx - 1; j++)
{
const int k = i * nx + j;
div_u[k] =
psi1[k] * (u[k + nx] - u[k]) + psi2[k] * (u[k - nx] - u[k]) +
psi3[k] * (u[k + 1] - u[k]) + psi4[k] * (u[k - 1] - u[k]);
div_v[k] =
psi1[k] * (v[k + nx] - v[k]) + psi2[k] * (v[k - nx] - v[k]) +
psi3[k] * (v[k + 1] - v[k]) + psi4[k] * (v[k - 1] - v[k]);
}
}
//calculate the divergence in the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++)
{
div_u[j] =
psi1[j] * (u[j + nx] - u[j]) + psi3[j] * (u[j + 1] - u[j]) +
psi4[j] * (u[j - 1] - u[j]);
div_v[j] =
psi1[j] * (v[j + nx] - v[j]) + psi3[j] * (v[j + 1] - v[j]) +
psi4[j] * (v[j - 1] - v[j]);
const int k = (ny - 1) * nx + j;
div_u[k] =
psi2[k] * (u[k - nx] - u[k]) + psi3[k] * (u[k + 1] - u[k]) +
psi4[k] * (u[k - 1] - u[k]);
div_v[k] =
psi2[k] * (v[k - nx] - v[k]) + psi3[k] * (v[k + 1] - v[k]) +
psi4[k] * (v[k - 1] - v[k]);
}
//calculate the divergence in the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++)
{
const int k = i * nx;
div_u[k] =
psi1[k] * (u[k + nx] - u[k]) + psi2[k] * (u[k - nx] - u[k]) +
psi3[k] * (u[k + 1] - u[k]);
div_v[k] =
psi1[k] * (v[k + nx] - v[k]) + psi2[k] * (v[k - nx] - v[k]) +
psi3[k] * (v[k + 1] - v[k]);
const int j = (i + 1) * nx - 1;
div_u[j] =
psi1[j] * (u[j + nx] - u[j]) + psi2[j] * (u[j - nx] - u[j]) +
psi4[j] * (u[j - 1] - u[j]);
div_v[j] =
psi1[j] * (v[j + nx] - v[j]) + psi2[j] * (v[j - nx] - v[j]) +
psi4[j] * (v[j - 1] - v[j]);
}
//up-left corner (0,0)
div_u[0] = psi1[0] * (u[nx] - u[0]) + psi3[0] * (u[1] - u[0]);
div_v[0] = psi1[0] * (v[nx] - v[0]) + psi3[0] * (v[1] - v[0]);
//up-right corner (nx,0)
div_u[nx - 1] =
psi1[nx - 1] * (u[nx - 1 + nx] - u[nx - 1]) + psi4[nx - 1] * (u[nx - 2] -
u[nx - 1]);
div_v[nx - 1] =
psi1[nx - 1] * (v[nx - 1 + nx] - v[nx - 1]) + psi4[nx - 1] * (v[nx - 2] -
v[nx - 1]);
//bottom-left corner (0,ny)
div_u[(ny - 1) * nx] =
psi2[(ny - 1) * nx] * (u[(ny - 2) * nx] - u[(ny - 1) * nx]) +
psi3[(ny - 1) * nx] * (u[(ny - 1) * nx + 1] - u[(ny - 1) * nx]);
div_v[(ny - 1) * nx] =
psi2[(ny - 1) * nx] * (v[(ny - 2) * nx] - v[(ny - 1) * nx]) +
psi3[(ny - 1) * nx] * (v[(ny - 1) * nx + 1] - v[(ny - 1) * nx]);
//bottom-right corner (nx,ny)
div_u[ny * nx - 1] =
psi2[ny * nx - 1] * (u[ny * nx - 1 - nx] - u[ny * nx - 1]) +
psi4[ny * nx - 1] * (u[ny * nx - 2] - u[ny * nx - 1]);
div_v[ny * nx - 1] =
psi2[ny * nx - 1] * (v[ny * nx - 1 - nx] - v[ny * nx - 1]) +
psi4[ny * nx - 1] * (v[ny * nx - 2] - v[ny * nx - 1]);
}
#endif
|
toimg.c | /* Copyright 2013-2015 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu>
* 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
*/
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <strings.h>
#include <complex.h>
#include <stdbool.h>
#include "num/multind.h"
#include "num/init.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/mmio.h"
#include "misc/png.h"
#include "misc/dicom.h"
#ifndef DIMS
#define DIMS 16
#endif
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
static const char usage_str[] = "[-h] <input> <output_prefix>";
static const char help_str[] = "Create magnitude images as png or proto-dicom.\n"
"The first two non-singleton dimensions will\n"
"be used for the image, and the other dimensions\n"
"will be looped over.\n";
static void toimg(bool dicom, const char* name, long inum, float max, long h, long w, const complex float* data)
{
int len = strlen(name);
assert(len >= 1);
int nr_bytes = dicom ? 2 : 3;
unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]);
float max_val = dicom ? 65535. : 255.;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
unsigned int value = max_val * (cabsf(data[j * h + i]) / max);
if (!dicom) {
(*buf)[i][j][0] = value;
(*buf)[i][j][1] = value;
(*buf)[i][j][2] = value;
} else {
(*buf)[i][j][0] = (value >> 0) & 0xFF;
(*buf)[i][j][2] = (value >> 8) & 0xFF;
}
}
}
(dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]);
free(buf);
}
static void toimg_stack(const char* name, bool dicom, const long dims[DIMS], const complex float* data)
{
long data_size = md_calc_size(DIMS, dims);
long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
int l = 0;
for (int i = 0; i < DIMS; i++)
if (1 != dims[i])
sq_dims[l++] = dims[i];
float max = 0.;
for (long i = 0; i < data_size; i++)
max = MAX(cabsf(data[i]), max);
if (0. == max)
max = 1.;
int len = strlen(name);
assert(len >= 1);
long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2);
long img_size = md_calc_size(2, sq_dims);
debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs);
#pragma omp parallel for
for (long i = 0; i < num_imgs; i++) {
char name_i[len + 10]; // extra space for ".0000.png"
if (num_imgs > 1)
sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png");
else
sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png");
toimg(dicom, name_i, i, max, sq_dims[0], sq_dims[1], data + i * img_size);
}
debug_printf(DP_INFO, "done.\n", num_imgs);
}
int main_toimg(int argc, char* argv[])
{
bool dicom = mini_cmdline_bool(argc, argv, 'd', 2, usage_str, help_str);
num_init();
// -d option is deprecated
char* ext = rindex(argv[2], '.');
if (NULL != ext) {
assert(!dicom);
if (0 == strcmp(ext, ".dcm"))
dicom = true;
else
if (0 != strcmp(ext, ".png"))
error("Unknown file extension.");
*ext = '\0';
}
long dims[DIMS];
complex float* data = load_cfl(argv[1], DIMS, dims);
toimg_stack(argv[2], dicom, dims, data);
unmap_cfl(DIMS, dims, data);
exit(0);
}
|
edgelist.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_EDGELIST_H_
#define SRC_EDGELIST_H_
template <typename T>
struct edge_t {
edge_t() {}
edge_t(int _src, int _dst, T _val)
{
src = _src;
dst = _dst;
val = _val;
}
int src;
int dst;
T val;
};
template <typename T>
struct edgelist_t {
edge_t<T>* edges;
int m;
int n;
int nnz;
edgelist_t() : m(0), n(0), nnz(0) {}
edgelist_t(int _m, int _n, int _nnz)
{
m = _m;
n = _n;
nnz = _nnz;
edges = (edge_t<T>*)_mm_malloc(nnz * sizeof(edge_t<T>), 64);
}
};
template <typename T>
struct tedge_t {
int src;
int dst;
int tile_id;
T val;
};
template <typename T>
void load_edgelist(const char* dir, int myrank, int nrank,
edgelist_t<T>* edgelist) {
edgelist->nnz = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
//printf("Opening file: %s\n", fname_ss.str().c_str());
FILE* fp = fopen(fname_ss.str().c_str(), "r");
if(!fp) break;
int tmp_[3];
fread(tmp_, sizeof(int), 3, fp);
edgelist->m = tmp_[0];
edgelist->n = tmp_[1];
edgelist->nnz += tmp_[2];
fclose(fp);
}
MPI_Bcast(&(edgelist->m), 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&(edgelist->n), 1, MPI_INT, 0, MPI_COMM_WORLD);
std::cout << "Got: " << edgelist->nnz << " edges\n" << std::endl;
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)edgelist->nnz * (uint64_t)sizeof(edge_t<T>), 64));
int nnzcnt = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
//printf("Opening file: %s\n", fname_ss.str().c_str());
FILE* fp = fopen(fname_ss.str().c_str(), "r");
if(!fp) break;
int tmp_[3];
fread(tmp_, sizeof(int), 3, fp);
assert(tmp_[0] == edgelist->m);
assert(tmp_[1] == edgelist->n);
fread(edgelist->edges + nnzcnt, sizeof(edge_t<T>), tmp_[2], fp);
#ifdef __DEBUG
for(int j = 0 ; j < tmp_[2] ; j++)
{
if(edgelist->edges[nnzcnt].src <= 0 ||
edgelist->edges[nnzcnt].dst <= 0 ||
edgelist->edges[nnzcnt].src > edgelist->m ||
edgelist->edges[nnzcnt].dst > edgelist->n)
{
std::cout << "Invalid edge, i, j, nnz: " << i << " , " << j << " , " << nnzcnt << std::endl;
exit(0);
}
nnzcnt++;
}
#else
nnzcnt += tmp_[2];
#endif
fclose(fp);
}
}
template <typename T>
void write_edgelist_txt(const char* dir, int myrank, int nrank,
const edgelist_t<T> & edgelist)
{
std::stringstream fname_ss;
fname_ss << dir << global_myrank;
printf("Opening file: %s\n", fname_ss.str().c_str());
FILE * fp = fopen(fname_ss.str().c_str(), "w");
fprintf(fp, "%d %d %u\n", edgelist.m, edgelist.n, edgelist.nnz);
for(int i = 0 ; i < edgelist.nnz ; i++)
{
fprintf(fp, "%d %d %.15e\n", edgelist.edges[i].src, edgelist.edges[i].dst, edgelist.edges[i].val);
}
fclose(fp);
}
template <typename T>
void load_edgelist_txt(const char* dir, int myrank, int nrank,
edgelist_t<T>* edgelist) {
edgelist->nnz = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
//printf("Opening file: %s\n", fname_ss.str().c_str());
FILE* fp = fopen(fname_ss.str().c_str(), "r");
if(!fp) break;
int tmp_[3];
fscanf(fp, "%d %d %u", &(tmp_[0]), &(tmp_[1]), &(tmp_[2]));
edgelist->m = tmp_[0];
edgelist->n = tmp_[1];
edgelist->nnz += tmp_[2];
fclose(fp);
}
std::cout << "Got: " << edgelist->nnz << " edges\n" << std::endl;
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)edgelist->nnz * (uint64_t)sizeof(edge_t<T>), 64));
int nnzcnt = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
//printf("Opening file: %s\n", fname_ss.str().c_str());
FILE* fp = fopen(fname_ss.str().c_str(), "r");
if(!fp) break;
int tmp_[3];
fscanf(fp, "%d %d %u", &(tmp_[0]), &(tmp_[1]), &(tmp_[2]));
assert(tmp_[0] == edgelist->m);
assert(tmp_[1] == edgelist->n);
for(int j = 0 ; j < tmp_[2] ; j++)
{
if(!fscanf(fp, "%d %d %lf", &(edgelist->edges[nnzcnt].src),
&(edgelist->edges[nnzcnt].dst), &(edgelist->edges[nnzcnt].val)))
{
std::cout << "Bad edge read, i, j, nnzcnt " << i << " , " << j << " , " << nnzcnt << std::endl;
exit(0);
}
if(edgelist->edges[nnzcnt].src <= 0 ||
edgelist->edges[nnzcnt].dst <= 0 ||
edgelist->edges[nnzcnt].src > edgelist->m ||
edgelist->edges[nnzcnt].dst > edgelist->n)
{
std::cout << "Invalid edge, i, j, nnz: " << i << " , " << j << " , " << nnzcnt << std::endl;
exit(0);
}
nnzcnt++;
}
fclose(fp);
}
}
template <typename T>
void randomize_edgelist_square(edgelist_t<T>* edgelist, int nrank) {
unsigned int* mapping = new unsigned int[edgelist->m];
unsigned int* rval = new unsigned int[edgelist->m];
if (global_myrank == 0) {
srand(5);
// #pragma omp parallel for
for (int i = 0; i < edgelist->m; i++) {
mapping[i] = i;
rval[i] = rand() % edgelist->m;
}
for (int i = 0; i < edgelist->m; i++) {
unsigned int tmp = mapping[i];
mapping[i] = mapping[rval[i]];
mapping[rval[i]] = tmp;
}
}
delete[] rval;
MPI_Bcast(mapping, edgelist->m, MPI_INT, 0, MPI_COMM_WORLD);
#pragma omp parallel for
for (int i = 0; i < edgelist->nnz; i++) {
edgelist->edges[i].src = mapping[edgelist->edges[i].src - 1] + 1;
edgelist->edges[i].dst = mapping[edgelist->edges[i].dst - 1] + 1;
}
delete[] mapping;
}
template<typename T>
void remove_empty_columns(edgelist_t<T> * edges, int ** remaining_indices)
{
// Remove empty columns
bool * colexists = new bool[edges->n];
memset(colexists, 0, edges->n * sizeof(bool));
int * new_colids = new int[edges->n+1];
memset(new_colids, 0, (edges->n + 1) * sizeof(int));
int new_ncols = 0;
for(int i = 0 ; i < edges->nnz ; i++)
{
if(!colexists[edges->edges[i].dst-1])
{
new_ncols++;
}
colexists[edges->edges[i].dst-1] = true;
}
std::cout << "New ncols: " << new_ncols << std::endl;
*(remaining_indices) = (int*) _mm_malloc(new_ncols * sizeof(int), 64);
int new_colcnt = 0;
for(int i = 0 ; i < edges->n; i++)
{
new_colids[i+1] = (colexists[i] ? 1 : 0) + new_colids[i];
if(colexists[i])
{
assert(new_colcnt < new_ncols);
(*(remaining_indices))[new_colcnt] = i+1;
new_colcnt++;
}
}
assert(new_colcnt == new_ncols);
#pragma omp parallel for
for(int i = 0 ; i < edges->nnz ; i++)
{
edges->edges[i].dst = new_colids[edges->edges[i].dst-1] + 1;
assert(edges->edges[i].dst - 1 >= 0);
assert(edges->edges[i].dst - 1 < new_ncols);
}
edges->n = new_ncols;
delete [] colexists;
delete [] new_colids;
}
template<typename T>
void filter_edges_by_row(edgelist_t<T> * edges, int start_row, int end_row)
{
int valid_edgecnt = 0;
for(int i = 0 ; i < edges->nnz ; i++)
{
if(edges->edges[i].src-1 < end_row &&
edges->edges[i].src-1 >= start_row)
{
edges->edges[valid_edgecnt] = edges->edges[i];
edges->edges[valid_edgecnt].src -= start_row;
valid_edgecnt++;
}
}
edges->nnz = valid_edgecnt;
edges->m = (end_row-start_row);
std::cout << "New edges->m: " << edges->m << std::endl;
}
#endif // SRC_EDGELIST_H_
|
ex6-matrix-max-openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define VALIDATE 0
#if VALIDATE
#include "validate.h"
#endif
int max(const size_t n, const int * restrict);
void usage(char**);
int main(int argc, char **argv)
{
int *A,max_val;
size_t i,j,n;
double t0,t1;
if(argc==2)
sscanf(argv[1],"%zu",&n);
else {
usage(argv);
return 1;
}
srand(42); // The Answer
A = (int*)malloc(n*n*sizeof(int));
for(i=0; i<n; ++i)
for(j=0; j<n; ++j)
A[i*n+j]=rand();
t0 = omp_get_wtime();
max_val = max(n,A);
t1 = omp_get_wtime();
#if VALIDATE
if(!validate_max(n,A,max_val)) {
printf("Validation failed.\n");
return 1;
}
#endif
printf("max(A) = %d\n",max_val);
printf("Total time taken: %f.\n",t1-t0);
free(A);
return 0;
}
int max(const size_t n, const int * restrict A)
{
int max_val=A[0];
size_t i,j;
#pragma omp parallel for default(none) shared(max_val,A) private(i,j)
for(i=0; i<n; ++i)
for(j=0; j<n; ++j)
if(A[i*n+j]>max_val)
#pragma omp critical
{
max_val=A[i*n+j];
}
return max_val;
}
void usage(char **argv)
{
printf("Usage: %s <length>\n",argv[0]);
}
|
numeros.c | // Compilar usando: gcc-9 -fopenmp numeros.c por la librería omp
#include <stdio.h>
#include <omp.h>
int main(int argc, char const *argv[])
{
int n = 1000000;
int suma = 0;
int i;
int suma_local = 0;
int numeros[n];
for (i = 0; i < n; i++)
{
numeros[i] = 1;
}
#pragma omp parallel private(i)
{
int numH = omp_get_num_threads();
int tid = omp_get_thread_num();
for (i = 0; i < n; i++)
#pragma omp critical
{
suma += numeros[i];
}
printf("Soy el hilo %d de %d\n", tid + 1, numH);
}
printf("La suma es %d\n", suma);
return 0;
}
|
bezier_classical_post_utility.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 2013-10-12 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED )
#define KRATOS_BEZIER_CLASSICAL_POST_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
#ifdef ISOGEOMETRIC_USE_MPI
#include "mpi.h"
#endif
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/properties.h"
#include "includes/ublas_interface.h"
#include "includes/deprecated_variables.h"
#include "includes/legacy_structural_app_vars.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
#include "utilities/auto_collapse_spatial_binning.h"
#include "custom_utilities/iga_define.h"
#include "custom_geometries/isogeometric_geometry.h"
#include "custom_utilities/isogeometric_utility.h"
#include "custom_utilities/isogeometric_post_utility.h"
#include "isogeometric_application/isogeometric_application.h"
//#define DEBUG_LEVEL1
//#define DEBUG_LEVEL2
//#define DEBUG_MULTISOLVE
//#define DEBUG_GENERATE_MESH
#define ENABLE_PROFILING
namespace Kratos
{
///@addtogroup IsogeometricApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
template<class T> void AddToModelPart(ModelPart& rModelPart, typename T::Pointer pE);
template<> void AddToModelPart<Element>(ModelPart& rModelPart, typename Element::Pointer pE)
{
rModelPart.AddElement(pE);
}
template<> void AddToModelPart<Condition>(ModelPart& rModelPart, typename Condition::Pointer pC)
{
rModelPart.AddCondition(pC);
}
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/**
A simple utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated.
One shall carefully use this utility for large problem. Previously, this class is named IsogeometricClassicalPostUtility.
*/
class BezierClassicalPostUtility : public IsogeometricPostUtility
{
public:
///@name Type Definitions
///@{
typedef boost::numeric::ublas::vector<double> ValuesContainerType;
typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType;
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::PointType NodeType;
typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType;
typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef typename NodeType::DofsContainerType DofsContainerType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
typedef std::size_t IndexType;
/// Pointer definition of BezierClassicalPostUtility
KRATOS_CLASS_POINTER_DEFINITION(BezierClassicalPostUtility);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BezierClassicalPostUtility(ModelPart::Pointer pModelPart)
: mpModelPart(pModelPart)
{
}
/// Destructor.
virtual ~BezierClassicalPostUtility()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Generate the post model_part from reference model_part
/// Deprecated
void GenerateModelPart(ModelPart::Pointer pModelPartPost, PostElementType postElementType)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
#ifdef DEBUG_LEVEL1
std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl;
#endif
ElementsArrayType& pElements = mpModelPart->Elements();
#ifdef DEBUG_LEVEL1
std::cout << "Retrieved pElements" << std::endl;
#endif
std::string NodeKey = std::string("Node");
//select the correct post element type
std::string element_name;
if(postElementType == _TRIANGLE_)
element_name = std::string("KinematicLinear2D3N");
else if(postElementType == _QUADRILATERAL_)
element_name = std::string("KinematicLinear2D4N");
else if(postElementType == _TETRAHEDRA_)
element_name = std::string("KinematicLinear3D4N");
else if(postElementType == _HEXAHEDRA_)
element_name = std::string("KinematicLinear3D8N");
else
KRATOS_THROW_ERROR(std::logic_error, "This element type is not supported for isogeometric post-processing", __FUNCTION__);
if(!KratosComponents<Element>::Has(element_name))
{
std::stringstream buffer;
buffer << "Element " << element_name << " is not registered in Kratos.";
buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly.";
KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), "");
}
Element const& rCloneElement = KratosComponents<Element>::Get(element_name);
IndexType NodeCounter = 0;
IndexType ElementCounter = 0;
boost::progress_display show_progress( pElements.size() );
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
if((*it)->GetValue( IS_INACTIVE ))
{
// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl;
continue;
}
int Dim = (*it)->GetGeometry().WorkingSpaceDimension();
IndexType NodeCounter_old = NodeCounter;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(Dim)
#endif
//get the properties
Properties::Pointer pDummyProperties = (*it)->pGetProperties();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(*pDummyProperties)
#endif
// generate list of nodes
if(Dim == 1)
{
// TODO
}
else if(Dim == 2)
{
IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) );
IndexType i, j;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
p_ref[2] = 0.0;
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref);
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(++NodeCounter);
#ifdef DEBUG_GENERATE_MESH
// if(NodeCounter == 585 || NodeCounter == 588 || NodeCounter == 589)
if(NodeCounter)
{
std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl;
}
#endif
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(pModelPartPost->GetBufferSize());
pModelPartPost->AddNode(pNewNode);
mNodeToLocalCoordinates(pNewNode->Id()) = p_ref;
mNodeToElement(pNewNode->Id()) = (*it)->Id();
}
}
//for correct mapping to element, the repetitive node is allowed.
// pModelPartPost->Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(pModelPartPost->Nodes().size())
std::cout << "Generating Elements..." << std::endl;
#endif
// create and add element
// Element::NodesArrayType temp_element_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
// int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
// int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
// int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
// if(postElementType == _TRIANGLE_)
// {
// // TODO: check if jacobian checking is necessary
// temp_element_nodes.clear();
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base()));
// Element::Pointer NewElement1 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties);
// pModelPartPost->AddElement(NewElement1);
// mOldToNewElements[(*it)->Id()].insert(ElementCounter);
// temp_element_nodes.clear();
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base()));
// Element::Pointer NewElement2 = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties);
// pModelPartPost->AddElement(NewElement2);
// mOldToNewElements[(*it)->Id()].insert(ElementCounter);
// }
// else if(postElementType == _QUADRILATERAL_)
// {
// // TODO: check if jacobian checking is necessary
// temp_element_nodes.clear();
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base()));
// Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties);
// pModelPartPost->AddElement(NewElement);
// mOldToNewElements[(*it)->Id()].insert(ElementCounter);
// }
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
if(postElementType == _TRIANGLE_)
{
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4});
connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3});
}
else if(postElementType == _QUADRILATERAL_)
{
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3});
}
}
}
ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>(
connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey);
for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2)
{
pModelPartPost->AddElement(*it2);
mOldToNewElements[(*it)->Id()].insert((*it2)->Id());
}
pModelPartPost->Elements().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(pModelPartPost->Elements().size())
#endif
}
else if(Dim == 3)
{
IndexType NumDivision1 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_2) );
IndexType NumDivision3 = static_cast<IndexType>( (*it)->GetValue(NUM_DIVISION_3) );
IndexType i, j, k;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH((*it)->Id())
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
KRATOS_WATCH(NumDivision3)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
for(k = 0; k <= NumDivision3; ++k)
{
p_ref[2] = ((double) k) / NumDivision3;
p = GlobalCoordinates((*it)->GetGeometry(), p, p_ref);
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(++NodeCounter);
#ifdef DEBUG_GENERATE_MESH
if(NodeCounter)
{
std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl;
}
#endif
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&pModelPartPost->GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(pModelPartPost->GetBufferSize());
pModelPartPost->AddNode(pNewNode);
mNodeToLocalCoordinates(pNewNode->Id()) = p_ref;
mNodeToElement(pNewNode->Id()) = (*it)->Id();
}
}
}
//for correct mapping to element, the repetitive node is allowed.
// pModelPartPost->Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(pModelPartPost->Nodes().size())
std::cout << "Generating Elements..." << std::endl;
#endif
// create and add element
// Element::NodesArrayType temp_element_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// for(k = 0; k < NumDivision3; ++k)
// {
// int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node5 = Node1 + 1;
// int Node6 = Node2 + 1;
// int Node7 = Node3 + 1;
// int Node8 = Node4 + 1;
// if(postElementType == _TETRAHEDRA_)
// {
// // TODO: check if jacobian checking is necessary
// }
// else if(postElementType == _HEXAHEDRA_)
// {
// // TODO: check if jacobian checking is necessary
// temp_element_nodes.clear();
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node1, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node2, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node4, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node3, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node5, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node6, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node8, NodeKey).base()));
// temp_element_nodes.push_back(*(FindKey(pModelPartPost->Nodes(), Node7, NodeKey).base()));
// Element::Pointer NewElement = rCloneElement.Create(++ElementCounter, temp_element_nodes, pDummyProperties);
// pModelPartPost->AddElement(NewElement);
// mOldToNewElements[(*it)->Id()].insert(ElementCounter);
// }
// }
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
for(k = 0; k < NumDivision3; ++k)
{
IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node5 = Node1 + 1;
IndexType Node6 = Node2 + 1;
IndexType Node7 = Node3 + 1;
IndexType Node8 = Node4 + 1;
if(postElementType == _TETRAHEDRA_)
{
// TODO: check if creating Tetrahedra is correct
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4});
connectivities.push_back(std::vector<IndexType>{Node1, Node4, Node3});
}
else if(postElementType == _HEXAHEDRA_)
{
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7});
}
}
}
}
ElementsArrayType pNewElements = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, Element, ElementsArrayType>(
connectivities, *pModelPartPost, rCloneElement, ElementCounter, pDummyProperties, NodeKey);
for (typename ElementsArrayType::ptr_iterator it2 = pNewElements.ptr_begin(); it2 != pNewElements.ptr_end(); ++it2)
{
pModelPartPost->AddElement(*it2);
mOldToNewElements[(*it)->Id()].insert((*it2)->Id());
}
pModelPartPost->Elements().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(pModelPartPost->Elements().size())
#endif
}
++show_progress;
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "GeneratePostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl;
#else
std::cout << "GeneratePostModelPart completed" << std::endl;
#endif
std::cout << NodeCounter << " nodes and " << ElementCounter << " elements are created" << std::endl;
}
/// Generate the post model_part from reference model_part
/// this is the improved version of GenerateModelPart
/// which uses template function to generate post Elements for both Element and Condition
void GenerateModelPart2(ModelPart::Pointer pModelPartPost, const bool& generate_for_condition)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
#ifdef DEBUG_LEVEL1
std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl;
#endif
ElementsArrayType& pElements = mpModelPart->Elements();
ConditionsArrayType& pConditions = mpModelPart->Conditions();
std::string NodeKey = std::string("Node");
IndexType NodeCounter = 0;
IndexType ElementCounter = 0;
boost::progress_display show_progress( pElements.size() );
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
// This is wrong, we will not skill the IS_INACTIVE elements
// TODO: to be deleted
// if((*it)->GetValue( IS_INACTIVE ))
// {
//// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl;
// ++show_progress;
// continue;
// }
if((*it)->pGetGeometry() == 0)
KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at element", (*it)->Id())
int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on
int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry
IndexType NodeCounter_old = NodeCounter;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(Dim)
KRATOS_WATCH(ReducedDim)
#endif
//select the correct post element type
std::string element_name;
if((Dim == 2) && (ReducedDim == 2))
{
element_name = std::string("KinematicLinear2D4N");
}
else if((Dim == 3) && (ReducedDim == 2))
{
element_name = std::string("KinematicLinear2D4N");
}
else if((Dim == 3) && (ReducedDim == 3))
{
element_name = std::string("KinematicLinear3D8N");
}
else
{
std::stringstream ss;
ss << "Invalid dimension of ";
ss << typeid(*(*it)).name();
ss << ", Dim = " << Dim;
ss << ", ReducedDim = " << ReducedDim;
KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__);
}
if(!KratosComponents<Element>::Has(element_name))
{
std::stringstream buffer;
buffer << "Element " << element_name << " is not registered in Kratos.";
buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly.";
KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), "");
}
Element const& rCloneElement = KratosComponents<Element>::Get(element_name);
GenerateForOneEntity<Element, ElementsArrayType, 1>(*pModelPartPost,
*(*it), rCloneElement, NodeCounter_old, NodeCounter, ElementCounter, NodeKey);
++show_progress;
}
KRATOS_WATCH(ElementCounter)
#ifdef DEBUG_LEVEL1
std::cout << "Done generating for elements" << std::endl;
#endif
IndexType ConditionCounter = 0;
if (generate_for_condition)
{
boost::progress_display show_progress2( pConditions.size() );
for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it)
{
// This is wrong, we will not kill the IS_INACTIVE conditions
// TODO: to be deleted
// if((*it)->GetValue( IS_INACTIVE ))
// {
//// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl;
// ++show_progress2;
// continue;
// }
if((*it)->pGetGeometry() == 0)
KRATOS_THROW_ERROR(std::logic_error, "Error: geometry is NULL at condition", (*it)->Id())
int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on
int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry
IndexType NodeCounter_old = NodeCounter;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(typeid((*it)->GetGeometry()).name())
KRATOS_WATCH(Dim)
KRATOS_WATCH(ReducedDim)
#endif
//select the correct post condition type
std::string condition_name;
if(Dim == 3 && ReducedDim == 1)
condition_name = std::string("LineForce3D2N");
else if(Dim == 3 && ReducedDim == 2)
condition_name = std::string("FaceForce3D4N");
else
{
std::stringstream ss;
ss << "Invalid dimension of ";
ss << typeid(*(*it)).name();
ss << ", Dim = " << Dim;
ss << ", ReducedDim = " << ReducedDim;
ss << ". Condition " << (*it)->Id() << " will be skipped.";
// KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__);
continue;
}
if(!KratosComponents<Condition>::Has(condition_name))
{
std::stringstream buffer;
buffer << "Condition " << condition_name << " is not registered in Kratos.";
buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly.";
KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), "");
}
Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name);
GenerateForOneEntity<Condition, ConditionsArrayType, 2>(*pModelPartPost,
*(*it), rCloneCondition, NodeCounter_old, NodeCounter, ConditionCounter, NodeKey);
++show_progress2;
}
KRATOS_WATCH(ConditionCounter)
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "GeneratePostModelPart2 completed: " << (end_compute - start_compute) << " s" << std::endl;
#else
std::cout << "GeneratePostModelPart2 completed" << std::endl;
#endif
std::cout << NodeCounter << " nodes and " << ElementCounter << " elements";
if (generate_for_condition)
std::cout << ", " << ConditionCounter << " conditions";
std::cout << " are created" << std::endl;
}
// Generate the post model_part from reference model_part
// this is the improved version of GenerateModelPart
// which uses template function to generate post Elements for both Element and Condition
// this version used a collapsing utility to collapse nodes automatically
void GenerateModelPart2AutoCollapse(ModelPart::Pointer pModelPartPost,
double dx, double dy, double dz, double tol)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
#ifdef DEBUG_LEVEL1
std::cout << typeid(*this).name() << "::GenerateModelPart" << std::endl;
#endif
AutoCollapseSpatialBinning collapse_util(0.0, 0.0, 0.0, dx, dy, dz, tol);
ElementsArrayType& pElements = mpModelPart->Elements();
ConditionsArrayType& pConditions = mpModelPart->Conditions();
std::string NodeKey = std::string("Node");
IndexType NodeCounter = 0;
IndexType ElementCounter = 0;
boost::progress_display show_progress( pElements.size() );
VectorMap<IndexType, IndexType> MapToCollapseNode;
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
if((*it)->GetValue( IS_INACTIVE ))
{
// std::cout << "Element " << (*it)->Id() << " is inactive" << std::endl;
++show_progress;
continue;
}
int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on
int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry
IndexType NodeCounter_old = NodeCounter;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(Dim)
KRATOS_WATCH(ReducedDim)
#endif
//select the correct post element type
std::string element_name;
if(Dim == 2 && ReducedDim == 2)
element_name = std::string("KinematicLinear2D4N");
else if(Dim == 3 && ReducedDim == 3)
element_name = std::string("KinematicLinear3D8N");
else
{
std::stringstream ss;
ss << "Invalid dimension of ";
ss << typeid(*(*it)).name();
ss << ", Dim = " << Dim;
ss << ", ReducedDim = " << ReducedDim;
KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__);
}
if(!KratosComponents<Element>::Has(element_name))
{
std::stringstream buffer;
buffer << "Element " << element_name << " is not registered in Kratos.";
buffer << " Please check the spelling of the element name and see if the application which containing it, is registered corectly.";
KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), "");
}
Element const& rCloneElement = KratosComponents<Element>::Get(element_name);
GenerateForOneEntityAutoCollapse<Element, ElementsArrayType, 1>(collapse_util,
*pModelPartPost, *(*it), rCloneElement, MapToCollapseNode, NodeCounter_old,
NodeCounter, ElementCounter, NodeKey);
++show_progress;
}
#ifdef DEBUG_LEVEL1
std::cout << "Done generating for elements" << std::endl;
#endif
IndexType ConditionCounter = 0;
boost::progress_display show_progress2( pConditions.size() );
for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it)
{
if((*it)->GetValue( IS_INACTIVE ))
{
// std::cout << "Condition " << (*it)->Id() << " is inactive" << std::endl;
++show_progress2;
continue;
}
int Dim = (*it)->GetGeometry().WorkingSpaceDimension(); // global dimension of the geometry that it works on
int ReducedDim = (*it)->GetGeometry().Dimension(); // reduced dimension of the geometry
IndexType NodeCounter_old = NodeCounter;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(typeid((*it)->GetGeometry()).name())
KRATOS_WATCH(Dim)
KRATOS_WATCH(ReducedDim)
#endif
//select the correct post condition type
std::string condition_name;
if(Dim == 3 && ReducedDim == 1)
condition_name = std::string("LineForce3D2N");
else if(Dim == 3 && ReducedDim == 2)
condition_name = std::string("FaceForce3D4N");
else
{
std::stringstream ss;
ss << "Invalid dimension of ";
ss << typeid(*(*it)).name();
ss << ", Dim = " << Dim;
ss << ", ReducedDim = " << ReducedDim;
KRATOS_THROW_ERROR(std::logic_error, ss.str(), __FUNCTION__);
}
if(!KratosComponents<Condition>::Has(condition_name))
{
std::stringstream buffer;
buffer << "Condition " << condition_name << " is not registered in Kratos.";
buffer << " Please check the spelling of the condition name and see if the application which containing it, is registered corectly.";
KRATOS_THROW_ERROR(std::runtime_error, buffer.str(), "");
}
Condition const& rCloneCondition = KratosComponents<Condition>::Get(condition_name);
GenerateForOneEntityAutoCollapse<Condition, ConditionsArrayType, 2>(collapse_util,
*pModelPartPost, *(*it), rCloneCondition, MapToCollapseNode, NodeCounter_old,
NodeCounter, ConditionCounter, NodeKey);
++show_progress2;
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Generate PostModelPart completed: " << (end_compute - start_compute) << " s" << std::endl;
#else
std::cout << "Generate PostModelPart completed" << std::endl;
#endif
std::cout << NodeCounter << " nodes and " << ElementCounter << " elements" << ", " << ConditionCounter << " conditions are created" << std::endl;
}
/**
* Utility function to generate elements/conditions for element/condition
* if TEntityType==Element, type must be 1; if T==Condition, type is 2
*/
template<class TEntityType, class TEntityContainerType, std::size_t type>
void GenerateForOneEntity(ModelPart& rModelPart,
TEntityType& rE,
TEntityType const& rSample,
IndexType NodeCounter_old,
IndexType& NodeCounter,
IndexType& EntityCounter,
const std::string& NodeKey)
{
// int ReducedDim = rE.GetGeometry().WorkingSpaceDimension();
int ReducedDim = rE.GetGeometry().Dimension();
//get the properties
Properties::Pointer pDummyProperties = rE.pGetProperties();
#ifdef DEBUG_LEVEL1
if(type == 1)
std::cout << "Generating for element " << rE.Id() << std::endl;
else
std::cout << "Generating for condition " << rE.Id() << std::endl;
KRATOS_WATCH(*pDummyProperties)
KRATOS_WATCH(EntityCounter)
#endif
// generate list of nodes
if(ReducedDim == 1)
{
// TODO
}
else if(ReducedDim == 2)
{
IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) );
IndexType i, j;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
p_ref[2] = 0.0;
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
p = GlobalCoordinates(rE.GetGeometry(), p, p_ref);
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(++NodeCounter);
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(rModelPart.GetBufferSize());
rModelPart.AddNode(pNewNode);
if(type == 1)
{
mNodeToLocalCoordinates(pNewNode->Id()) = p_ref;
mNodeToElement(pNewNode->Id()) = rE.Id();
}
}
}
//for correct mapping to element, the repetitive node is allowed.
// rModelPart.Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rModelPart.Nodes().size())
if(type == 1)
std::cout << "Generating Elements..." << std::endl;
else
std::cout << "Generating Conditions..." << std::endl;
#endif
// create and add element
// typename T::NodesArrayType temp_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
// int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
// int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
// int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
// // TODO: check if jacobian checking is necessary
// temp_nodes.clear();
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base()));
// int NewEntityId = ++EntityCounter;
// // int NewEntityId = rE.Id(); ++EntityCounter;
// typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties);
// AddToModelPart<TEntityType>(rModelPart, NewEntity);
// if(type == 1)
// mOldToNewElements[rE.Id()].insert(NewEntityId);
// else if(type == 2)
// mOldToNewConditions[rE.Id()].insert(NewEntityId);
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3});
}
}
TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>(
connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey);
for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2)
{
AddToModelPart<TEntityType>(rModelPart, *it2);
if(type == 1)
mOldToNewElements[rE.Id()].insert((*it2)->Id());
else if(type == 2)
mOldToNewConditions[rE.Id()].insert((*it2)->Id());
}
if(type == 1)
rModelPart.Elements().Unique();
else if(type == 2)
rModelPart.Conditions().Unique();
#ifdef DEBUG_LEVEL1
if(type == 1)
KRATOS_WATCH(rModelPart.Elements().size())
else
KRATOS_WATCH(rModelPart.Conditions().size())
#endif
}
else if(ReducedDim == 3)
{
IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) );
IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) );
IndexType i, j, k;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rE.Id())
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
KRATOS_WATCH(NumDivision3)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
for(k = 0; k <= NumDivision3; ++k)
{
p_ref[2] = ((double) k) / NumDivision3;
p = GlobalCoordinates(rE.GetGeometry(), p, p_ref);
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(++NodeCounter);
#ifdef DEBUG_GENERATE_MESH
if(NodeCounter)
{
std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl;
}
#endif
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(rModelPart.GetBufferSize());
rModelPart.AddNode(pNewNode);
if(type == 1)
{
mNodeToLocalCoordinates(pNewNode->Id()) = p_ref;
mNodeToElement(pNewNode->Id()) = rE.Id();
}
}
}
}
//for correct mapping to element, the repetitive node is allowed.
// rModelPart.Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rModelPart.Nodes().size())
if(type == 1)
std::cout << "Generating Elements..." << std::endl;
else
std::cout << "Generating Conditions..." << std::endl;
#endif
// create and add element
// typename T::NodesArrayType temp_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// for(k = 0; k < NumDivision3; ++k)
// {
// int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node5 = Node1 + 1;
// int Node6 = Node2 + 1;
// int Node7 = Node3 + 1;
// int Node8 = Node4 + 1;
// // TODO: check if jacobian checking is necessary
// temp_nodes.clear();
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node1, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node2, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node4, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node3, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node5, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node6, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node8, NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), Node7, NodeKey).base()));
// int NewEntityId = ++EntityCounter;
// typename TEntityType::Pointer NewEntity = rSample.Create(NewEntityId, temp_nodes, pDummyProperties);
// AddToModelPart<TEntityType>(rModelPart, NewEntity);
// if(type == 1)
// mOldToNewElements[rE.Id()].insert(NewEntityId);
// else if(type == 2)
// mOldToNewConditions[rE.Id()].insert(NewEntityId);
// }
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
for(k = 0; k < NumDivision3; ++k)
{
IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node5 = Node1 + 1;
IndexType Node6 = Node2 + 1;
IndexType Node7 = Node3 + 1;
IndexType Node8 = Node4 + 1;
connectivities.push_back(std::vector<IndexType>{Node1, Node2, Node4, Node3, Node5, Node6, Node8, Node7});
}
}
}
TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>(
connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey);
for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2)
{
AddToModelPart<TEntityType>(rModelPart, *it2);
if(type == 1)
mOldToNewElements[rE.Id()].insert((*it2)->Id());
else if(type == 2)
mOldToNewConditions[rE.Id()].insert((*it2)->Id());
}
if(type == 1)
rModelPart.Elements().Unique();
else if(type == 2)
rModelPart.Conditions().Unique();
#ifdef DEBUG_LEVEL1
if(type == 1)
KRATOS_WATCH(rModelPart.Elements().size())
else
KRATOS_WATCH(rModelPart.Conditions().size())
#endif
}
}
/**
* Utility function to generate elements/conditions for element/condition.
* This uses a collapse utility to automatically merge the coincident nodes
* if T==Element, type must be 1; otherwise type=2
*/
template<class TEntityType, class TEntityContainerType, std::size_t type>
void GenerateForOneEntityAutoCollapse(AutoCollapseSpatialBinning& collapse_util,
ModelPart& rModelPart,
TEntityType& rE,
TEntityType const& rSample,
VectorMap<IndexType, IndexType>& rMapToCollapseNode,
IndexType NodeCounter_old,
IndexType& NodeCounter,
IndexType& EntityCounter,
const std::string& NodeKey)
{
// int ReducedDim = rE.GetGeometry().WorkingSpaceDimension();
int ReducedDim = rE.GetGeometry().Dimension();
//get the properties
Properties::Pointer pDummyProperties = rE.pGetProperties();
#ifdef DEBUG_LEVEL1
if(type == 1)
std::cout << "Generating for element " << rE.Id() << std::endl;
else
std::cout << "Generating for condition " << rE.Id() << std::endl;
KRATOS_WATCH(*pDummyProperties)
#endif
// generate list of nodes
if(ReducedDim == 1)
{
// TODO
}
else if(ReducedDim == 2)
{
IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) );
IndexType i, j;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
p_ref[2] = 0.0;
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
p = GlobalCoordinates(rE.GetGeometry(), p, p_ref);
IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) );
++NodeCounter;
rMapToCollapseNode[NodeCounter] = id;
if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end())
{
// this is a new node
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(id);
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(rModelPart.GetBufferSize());
rModelPart.AddNode(pNewNode);
}
else
{
// this is an old node, not required to add to model_part
// so do nothing
}
// in this way, the node will always point to the last local coodinates and element
if(type == 1)
{
mNodeToLocalCoordinates(id) = p_ref;
mNodeToElement(id) = rE.Id();
}
}
}
//for correct mapping to element, the repetitive node is allowed.
// rModelPart.Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rModelPart.Nodes().size())
if(type == 1)
std::cout << "Generating Elements..." << std::endl;
else
std::cout << "Generating Conditions..." << std::endl;
#endif
// create and add element
// typename T::NodesArrayType temp_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// int Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
// int Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
// int Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
// int Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
// // TODO: check if jacobian checking is necessary
// temp_nodes.clear();
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base()));
// typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties);
// AddToModelPart<T>(rModelPart, NewEntity);
// if(type == 1)
// mOldToNewElements[rE.Id()].insert(EntityCounter);
// else if(type == 2)
// mOldToNewConditions[rE.Id()].insert(EntityCounter);
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
IndexType Node1 = NodeCounter_old + i * (NumDivision2 + 1) + j + 1;
IndexType Node2 = NodeCounter_old + i * (NumDivision2 + 1) + j + 2;
IndexType Node3 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 1;
IndexType Node4 = NodeCounter_old + (i + 1) * (NumDivision2 + 1) + j + 2;
connectivities.push_back(std::vector<IndexType>{
rMapToCollapseNode[Node1],
rMapToCollapseNode[Node2],
rMapToCollapseNode[Node4],
rMapToCollapseNode[Node3]});
}
}
TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>(
connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey);
for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2)
{
AddToModelPart<TEntityType>(rModelPart, *it2);
if(type == 1)
mOldToNewElements[rE.Id()].insert((*it2)->Id());
else if(type == 2)
mOldToNewConditions[rE.Id()].insert((*it2)->Id());
}
if(type == 1)
rModelPart.Elements().Unique();
else if(type == 2)
rModelPart.Conditions().Unique();
#ifdef DEBUG_LEVEL1
if(type == 1)
KRATOS_WATCH(rModelPart.Elements().size())
else
KRATOS_WATCH(rModelPart.Conditions().size())
#endif
}
else if(ReducedDim == 3)
{
IndexType NumDivision1 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_1) );
IndexType NumDivision2 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_2) );
IndexType NumDivision3 = static_cast<IndexType>( rE.GetValue(NUM_DIVISION_3) );
IndexType i, j, k;
CoordinatesArrayType p_ref;
CoordinatesArrayType p;
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rE.Id())
KRATOS_WATCH(NumDivision1)
KRATOS_WATCH(NumDivision2)
KRATOS_WATCH(NumDivision3)
std::cout << "Generating Nodes..." << std::endl;
#endif
// create and add nodes
for(i = 0; i <= NumDivision1; ++i)
{
p_ref[0] = ((double) i) / NumDivision1;
for(j = 0; j <= NumDivision2; ++j)
{
p_ref[1] = ((double) j) / NumDivision2;
for(k = 0; k <= NumDivision3; ++k)
{
p_ref[2] = ((double) k) / NumDivision3;
p = GlobalCoordinates(rE.GetGeometry(), p, p_ref);
IndexType id = static_cast<IndexType>( collapse_util.AddNode(p[0], p[1], p[2]) );
++NodeCounter;
rMapToCollapseNode[NodeCounter] = id;
if(rModelPart.Nodes().find(id) == rModelPart.Nodes().end())
{
// this is a new node
NodeType::Pointer pNewNode( new NodeType( 0, p ) );
pNewNode->SetId(id);
#ifdef DEBUG_GENERATE_MESH
if(NodeCounter)
{
std::cout << "Node " << NodeCounter << " p_ref: " << p_ref << ", p: " << p << std::endl;
}
#endif
// Giving model part's variables list to the node
pNewNode->SetSolutionStepVariablesList(&rModelPart.GetNodalSolutionStepVariablesList());
//set buffer size
pNewNode->SetBufferSize(rModelPart.GetBufferSize());
rModelPart.AddNode(pNewNode);
}
else
{
// this is an old node, not required to add to model_part
// so do nothing
}
// in this way, the node will always point to the last local coodinates and element
if(type == 1)
{
mNodeToLocalCoordinates(id) = p_ref;
mNodeToElement(id) = rE.Id();
}
}
}
}
//for correct mapping to element, the repetitive node is allowed.
// rModelPart.Nodes().Unique();
#ifdef DEBUG_LEVEL1
KRATOS_WATCH(rModelPart.Nodes().size())
if(type == 1)
std::cout << "Generating Elements..." << std::endl;
else
std::cout << "Generating Conditions..." << std::endl;
#endif
// create and add element
// typename T::NodesArrayType temp_nodes;
// for(i = 0; i < NumDivision1; ++i)
// {
// for(j = 0; j < NumDivision2; ++j)
// {
// for(k = 0; k < NumDivision3; ++k)
// {
// int Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
// int Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
// int Node5 = Node1 + 1;
// int Node6 = Node2 + 1;
// int Node7 = Node3 + 1;
// int Node8 = Node4 + 1;
// // TODO: check if jacobian checking is necessary
// temp_nodes.clear();
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node1], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node2], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node4], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node3], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node5], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node6], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node8], NodeKey).base()));
// temp_nodes.push_back(*(FindKey(rModelPart.Nodes(), rMapToCollapseNode[Node7], NodeKey).base()));
// typename T::Pointer NewEntity = rSample.Create(++EntityCounter, temp_nodes, pDummyProperties);
// AddToModelPart<T>(rModelPart, NewEntity);
// if(type == 1)
// mOldToNewElements[rE.Id()].insert(EntityCounter);
// else if(type == 2)
// mOldToNewConditions[rE.Id()].insert(EntityCounter);
// }
// }
// }
std::vector<std::vector<IndexType> > connectivities;
for(i = 0; i < NumDivision1; ++i)
{
for(j = 0; j < NumDivision2; ++j)
{
for(k = 0; k < NumDivision3; ++k)
{
IndexType Node1 = NodeCounter_old + (i * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node2 = NodeCounter_old + (i * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node3 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j) * (NumDivision3 + 1) + k + 1;
IndexType Node4 = NodeCounter_old + ((i + 1) * (NumDivision2 + 1) + j + 1) * (NumDivision3 + 1) + k + 1;
IndexType Node5 = Node1 + 1;
IndexType Node6 = Node2 + 1;
IndexType Node7 = Node3 + 1;
IndexType Node8 = Node4 + 1;
connectivities.push_back(std::vector<IndexType>{
rMapToCollapseNode[Node1],
rMapToCollapseNode[Node2],
rMapToCollapseNode[Node4],
rMapToCollapseNode[Node3],
rMapToCollapseNode[Node5],
rMapToCollapseNode[Node6],
rMapToCollapseNode[Node8],
rMapToCollapseNode[Node7]});
}
}
}
TEntityContainerType pNewEntities = IsogeometricPostUtility::CreateEntities<std::vector<std::vector<IndexType> >, TEntityType, TEntityContainerType>(
connectivities, rModelPart, rSample, EntityCounter, pDummyProperties, NodeKey);
for (typename TEntityContainerType::ptr_iterator it2 = pNewEntities.ptr_begin(); it2 != pNewEntities.ptr_end(); ++it2)
{
AddToModelPart<TEntityType>(rModelPart, *it2);
if(type == 1)
mOldToNewElements[rE.Id()].insert((*it2)->Id());
else if(type == 2)
mOldToNewConditions[rE.Id()].insert((*it2)->Id());
}
if(type == 1)
rModelPart.Elements().Unique();
else if(type == 2)
rModelPart.Conditions().Unique();
#ifdef DEBUG_LEVEL1
if(type == 1)
KRATOS_WATCH(rModelPart.Elements().size())
else
KRATOS_WATCH(rModelPart.Conditions().size())
#endif
}
}
// Synchronize the activation between model_parts
void SynchronizeActivation(ModelPart::Pointer pModelPartPost)
{
ElementsArrayType& pElements = mpModelPart->Elements();
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()];
for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2)
{
pModelPartPost->GetElement(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE );
}
}
ConditionsArrayType& pConditions = mpModelPart->Conditions();
for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it)
{
std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()];
for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2)
{
pModelPartPost->GetCondition(*it2).GetValue(IS_INACTIVE) = (*it)->GetValue( IS_INACTIVE );
}
}
}
// transfer the elemental data
template<class TVariableType>
void TransferElementalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost)
{
ElementsArrayType& pElements = mpModelPart->Elements();
for(typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
std::set<IndexType> NewElements = mOldToNewElements[(*it)->Id()];
for(std::set<IndexType>::iterator it2 = NewElements.begin(); it2 != NewElements.end(); ++it2)
{
pModelPartPost->GetElement(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable);
}
}
}
// transfer the conditional data
template<class TVariableType>
void TransferConditionalData(const TVariableType& rThisVariable, ModelPart::Pointer pModelPartPost)
{
ConditionsArrayType& pConditions = mpModelPart->Conditions();
for(typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it)
{
std::set<IndexType> NewConditions = mOldToNewConditions[(*it)->Id()];
for(std::set<IndexType>::iterator it2 = NewConditions.begin(); it2 != NewConditions.end(); ++it2)
{
pModelPartPost->GetCondition(*it2).GetValue(rThisVariable) = (*it)->GetValue(rThisVariable);
}
}
}
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferNodalResults(
const TVariableType& rThisVariable,
const ModelPart::Pointer pModelPartPost
)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
NodesArrayType& pTargetNodes = pModelPartPost->Nodes();
ElementsArrayType& pElements = mpModelPart->Elements();
typename TVariableType::Type Results;
CoordinatesArrayType LocalPos;
IndexType ElementId;
// #pragma omp parallel for
//TODO: check this. This is not parallelized.
for(NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it)
{
IndexType key = (*it)->Id();
if(mNodeToElement.find(key) != mNodeToElement.end())
{
ElementId = mNodeToElement[key];
if( ! pElements(ElementId)->GetValue(IS_INACTIVE) ) // skip the inactive elements
{
noalias(LocalPos) = mNodeToLocalCoordinates[key];
Results = CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos);
(*it)->GetSolutionStepValue(rThisVariable) = Results;
}
}
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl;
#endif
}
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferIntegrationPointResults(
const TVariableType& rThisVariable,
const ModelPart::Pointer pModelPartPost,
LinearSolverType::Pointer pSolver
)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
// firstly transfer rThisVariable from integration points of reference model_part to its nodes
TransferVariablesToNodes(pSolver, mpModelPart, rThisVariable);
// secondly transfer new nodal variables results to the post model_part
TransferNodalResults(rThisVariable, pModelPartPost);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart::Pointer pModelPart,
LinearSolverType::Pointer pSolver
)
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, pModelPart, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
/**
* Utility function to renumber the nodes of the post model_part (for parallel merge)
*/
void GlobalNodalRenumbering(ModelPart::Pointer pModelPartPost)
{
#ifdef ISOGEOMETRIC_USE_MPI
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// gather the number of nodes on each process
int NumberOfNodes[size];
int MyNumberOfNodes = pModelPartPost->NumberOfNodes();
MPI_Allgather(&MyNumberOfNodes, 1, MPI_INT, NumberOfNodes, 1, MPI_INT, MPI_COMM_WORLD);
// std::cout << "NumberOfNodes:";
// for(int i = 0; i < size; ++i)
// std::cout << " " << NumberOfNodes[i];
// std::cout << std::endl;
// compute the numbering offset
int offset = 0;
for(int i = 0; i < rank; ++i)
offset += NumberOfNodes[i];
// renumber the nodes of the current process
for(ModelPart::NodeIterator it = pModelPartPost->NodesBegin(); it != pModelPartPost->NodesEnd(); ++it)
{
it->SetId(++offset);
it->GetSolutionStepValue(PARTITION_INDEX) = rank;
}
if(rank == 0)
std::cout << "Global renumbering completed" << std::endl;
#endif
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "BezierClassicalPostUtility";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BezierClassicalPostUtility";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart::Pointer mpModelPart; // pointer variable to a model_part
VectorMap<IndexType, CoordinatesArrayType> mNodeToLocalCoordinates; // vector map to store local coordinates of node on a NURBS entity
VectorMap<IndexType, IndexType> mNodeToElement; // vector map to store local coordinates of node on a NURBS entity
std::map<IndexType, std::set<IndexType> > mOldToNewElements; // vector map to store id map from old element to new elements
std::map<IndexType, std::set<IndexType> > mOldToNewConditions; // vector map to store id map from old condition to new conditions
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* Calculate global coodinates w.r.t initial configuration
*/
CoordinatesArrayType& GlobalCoordinates(
GeometryType& rGeometry,
CoordinatesArrayType& rResult,
CoordinatesArrayType const& LocalCoordinates
)
{
noalias( rResult ) = ZeroVector( 3 );
Vector ShapeFunctionsValues;
rGeometry.ShapeFunctionsValues(ShapeFunctionsValues, LocalCoordinates);
for ( IndexType i = 0 ; i < rGeometry.size() ; ++i )
{
noalias( rResult ) += ShapeFunctionsValues( i ) * rGeometry.GetPoint( i ).GetInitialPosition();
}
return rResult;
}
/**
* Interpolation on element
*/
double CalculateOnPoint(
const Variable<double>& rVariable,
double& rResult,
Element::Pointer& pElement,
const CoordinatesArrayType& rCoordinates
)
{
Vector N;
pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates);
rResult = 0.0;
for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i)
{
double NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable);
rResult += N( i ) * NodalValues;
}
return rResult;
}
/**
* Interpolation on element
*/
Vector& CalculateOnPoint(
const Variable<Vector>& rVariable,
Vector& rResult,
Element::Pointer& pElement,
const CoordinatesArrayType& rCoordinates
)
{
Vector N;
pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates);
for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i)
{
Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable);
if(i == 0)
{
rResult = N( i ) * NodalValues;
}
else
{
noalias(rResult) += N( i ) * NodalValues;
}
}
return rResult;
}
/**
* Interpolation on element
*/
array_1d<double, 3>& CalculateOnPoint(
const Variable<array_1d<double, 3> >& rVariable,
array_1d<double, 3>& rResult,
Element::Pointer& pElement,
const CoordinatesArrayType& rCoordinates
)
{
Vector N;
pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates);
rResult[0] = 0.0;
rResult[1] = 0.0;
rResult[2] = 0.0;
for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i)
{
array_1d<double, 3> NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable);
rResult += N( i ) * NodalValues;
}
return rResult;
}
/**
* Transfer variable at integration points to nodes
*
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
*/
void TransferVariablesToNodes(
LinearSolverType::Pointer& pSolver,
ModelPart::Pointer& pModelPart,
const Variable<double>& rThisVariable
)
{
ElementsArrayType& ElementsArray= pModelPart->Elements();
//Initialize system of equations
int NumberOfNodes = pModelPart->NumberOfNodes();
SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes);
noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes);
SerialSparseSpaceType::VectorType g(NumberOfNodes);
noalias(g)= ZeroVector(NumberOfNodes);
SerialSparseSpaceType::VectorType b(NumberOfNodes);
noalias(b)= ZeroVector(NumberOfNodes);
// create the structure for M a priori
ConstructL2MatrixStructure<Element>(M, ElementsArray);
// Transfer of GaussianVariables to Nodal Variables via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
// set up the system of equations
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads )
std::cout << "element_partition:";
for (std::size_t i = 0; i < element_partition.size(); ++i)
std::cout << " " << element_partition[i];
std::cout << std::endl;
//create the array of lock
std::vector< omp_lock_t > lock_array(NumberOfNodes);
for(unsigned int i = 0; i < NumberOfNodes; ++i)
omp_init_lock(&lock_array[i]);
#pragma omp parallel for
for(int k = 0; k < number_of_threads; ++k)
{
Matrix InvJ(3, 3);
double DetJ;
unsigned int row, col;
typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
if(!(*it)->GetValue(IS_INACTIVE))
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
// J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
// const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry());
J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod());
GeometryType::ShapeFunctionsGradientsType DN_De;
Matrix Ncontainer;
rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients(
Ncontainer,
DN_De,
(*it)->GetIntegrationMethod()
);
// get the values at the integration_points
std::vector<double> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo());
for(unsigned int point = 0; point< integration_points.size(); ++point)
{
MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ);
double dV = DetJ * integration_points[point].Weight();
for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim)
{
row = (*it)->GetGeometry()[prim].Id()-1;
omp_set_lock(&lock_array[row]);
b(row) += (ValuesOnIntPoint[point]) * Ncontainer(point, prim) * dV;
for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec)
{
col = (*it)->GetGeometry()[sec].Id()-1;
M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV;
}
omp_unset_lock(&lock_array[row]);
}
}
}
else
{
// for inactive elements the contribution to LHS is identity matrix and RHS is zero
for(unsigned int prim = 0 ; prim < (*it)->GetGeometry().size(); ++prim)
{
row = (*it)->GetGeometry()[prim].Id()-1;
omp_set_lock(&lock_array[row]);
// b(row) += 0.0;
for(unsigned int sec = 0 ; sec < (*it)->GetGeometry().size(); ++sec)
{
col = (*it)->GetGeometry()[sec].Id()-1;
if(col == row)
M(row, col) += 1.0;
// else
// M(row, col) += 0.0;
}
omp_unset_lock(&lock_array[row]);
}
}
}
}
for(unsigned int i = 0; i < NumberOfNodes; ++i)
omp_destroy_lock(&lock_array[i]);
// solver the system
pSolver->Solve(M, g, b);
// transfer the solution to the nodal variables
for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it)
{
it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1));
}
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
*/
void TransferVariablesToNodes(
LinearSolverType::Pointer& pSolver,
ModelPart::Pointer& pModelPart,
const Variable<Vector>& rThisVariable
)
{
ElementsArrayType& ElementsArray = pModelPart->Elements();
const unsigned int& Dim = (*(ElementsArray.ptr_begin()))->GetGeometry().WorkingSpaceDimension();
unsigned int VariableSize;
if(rThisVariable.Name() == std::string("STRESSES")
|| rThisVariable.Name() == std::string("PLASTIC_STRAIN_VECTOR")
|| rThisVariable.Name() == std::string("PRESTRESS")
|| rThisVariable.Name() == std::string("STRAIN")
// TODO: extend for more variables
)
{
VariableSize = Dim * (Dim + 1) / 2;
}
else
KRATOS_THROW_ERROR(std::logic_error, rThisVariable.Name(), " is not a supported variable for TransferVariablesToNodes routine.")
#ifdef ENABLE_PROFILING
//profiling variables
double start_compute, end_compute;
start_compute = OpenMPUtils::GetCurrentTime();
#endif
//Initialize system of equations
unsigned int NumberOfNodes = pModelPart->NumberOfNodes();
SerialSparseSpaceType::MatrixType M(NumberOfNodes, NumberOfNodes);
noalias(M)= ZeroMatrix(NumberOfNodes, NumberOfNodes);
// create the structure for M a priori
ConstructL2MatrixStructure<Element>(M, ElementsArray);
#ifdef ENABLE_PROFILING
end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "ConstructMatrixStructure completed: " << end_compute - start_compute << " s" << std::endl;
start_compute = end_compute;
#endif
SerialDenseSpaceType::MatrixType g(NumberOfNodes, VariableSize);
noalias(g)= ZeroMatrix(NumberOfNodes, VariableSize);
SerialDenseSpaceType::MatrixType b(NumberOfNodes, VariableSize);
noalias(b)= ZeroMatrix(NumberOfNodes, VariableSize);
std::vector< omp_lock_t > lock_array(NumberOfNodes);
for(unsigned int i = 0; i < NumberOfNodes; ++i)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads )
std::cout << "element_partition:";
for (std::size_t i = 0; i < element_partition.size(); ++i)
std::cout << " " << element_partition[i];
std::cout << std::endl;
#pragma omp parallel for
for(int k = 0; k < number_of_threads; ++k)
{
Matrix InvJ(Dim, Dim);
double DetJ;
unsigned int row, col;
typename ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k + 1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
if(!(*it)->GetValue(IS_INACTIVE))
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
// J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
// const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
IsogeometricGeometryType& rIsogeometricGeometry = dynamic_cast<IsogeometricGeometryType&>((*it)->GetGeometry());
J = rIsogeometricGeometry.Jacobian0(J, (*it)->GetIntegrationMethod());
GeometryType::ShapeFunctionsGradientsType DN_De;
Matrix Ncontainer;
rIsogeometricGeometry.CalculateShapeFunctionsIntegrationPointsValuesAndLocalGradients(
Ncontainer,
DN_De,
(*it)->GetIntegrationMethod()
);
// get the values at the integration_points
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, pModelPart->GetProcessInfo());
for(unsigned int point = 0; point < integration_points.size(); ++point)
{
MathUtils<double>::InvertMatrix(J[point], InvJ, DetJ);
double dV = DetJ * integration_points[point].Weight();
for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim)
{
row = (*it)->GetGeometry()[prim].Id() - 1;
omp_set_lock(&lock_array[row]);
for(unsigned int i = 0; i < VariableSize; ++i)
b(row, i) += ValuesOnIntPoint[point][i] * Ncontainer(point, prim) * dV;
for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec)
{
col = (*it)->GetGeometry()[sec].Id() - 1;
M(row, col) += Ncontainer(point, prim) * Ncontainer(point, sec) * dV;
}
omp_unset_lock(&lock_array[row]);
}
}
}
else
{
// for inactive elements the contribution to LHS is identity matrix and RHS is zero
for(unsigned int prim = 0; prim < (*it)->GetGeometry().size(); ++prim)
{
row = (*it)->GetGeometry()[prim].Id() - 1;
omp_set_lock(&lock_array[row]);
// for(unsigned int i = 0; i < VariableSize; ++i)
// b(row, i) += 0.0;
for(unsigned int sec = 0; sec < (*it)->GetGeometry().size(); ++sec)
{
col = (*it)->GetGeometry()[sec].Id() - 1;
if(col == row)
M(row, col) += 1.0;
// else
// M(row, col) += 0.0;
}
omp_unset_lock(&lock_array[row]);
}
}
}
}
for(unsigned int i = 0; i < NumberOfNodes; ++i)
omp_destroy_lock(&lock_array[i]);
#ifdef ENABLE_PROFILING
end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Assemble the matrix completed: " << end_compute - start_compute << " s" << std::endl;
start_compute = end_compute;
#endif
#ifdef DEBUG_MULTISOLVE
KRATOS_WATCH(M)
KRATOS_WATCH(b)
KRATOS_WATCH(*pSolver)
#endif
// solve the system
// solver must support the multisove method
pSolver->Solve(M, g, b);
#ifdef DEBUG_MULTISOLVE
KRATOS_WATCH(g)
#endif
// transfer the solution to the nodal variables
for(ModelPart::NodeIterator it = pModelPart->NodesBegin(); it != pModelPart->NodesEnd(); ++it)
{
Vector tmp(VariableSize);
for(unsigned int i = 0; i < VariableSize; ++i)
{
tmp(i) = g((it->Id()-1), i);
}
it->GetSolutionStepValue(rThisVariable) = tmp;
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BezierClassicalPostUtility& operator=(BezierClassicalPostUtility const& rOther)
{
return *this;
}
/// Copy constructor.
BezierClassicalPostUtility(BezierClassicalPostUtility const& rOther)
{
}
///@}
}; // Class BezierClassicalPostUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, BezierClassicalPostUtility& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const BezierClassicalPostUtility& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
GB_binop__plus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int32)
// A*D function (colscale): GB (_AxD__plus_int32)
// D*A function (rowscale): GB (_DxB__plus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int32)
// C=scalar+B GB (_bind1st__plus_int32)
// C=scalar+B' GB (_bind1st_tran__plus_int32)
// C=A+scalar GB (_bind2nd__plus_int32)
// C=A'+scalar GB (_bind2nd_tran__plus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint16
// op(A') function: GB_tran__minv_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint16
(
int64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_omp.c | /**********************************
* Programmer: Dimitrios Gkoulis *
* Email: gkould@gmail.com *
* Harokopio University of Athens *
* Parallel Computing & Algorithms *
* Project - Winter Semester *
* Period: 2015 - 2016 *
* IDE: Dec-C++ (4.9.0.2) *
***********************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define I_SIZE 100 // Input Size y (length) = x (length)
#define H_SIZE 3 // Mask Size y (length) = x (length)
#define SAVETOFILE 1 // Save to file flag
/* F U N C T I O N S D E C L A R A T I O N S */
int convolution(int I[I_SIZE][I_SIZE], int h[H_SIZE][H_SIZE], int y, int x); // CALCULATE CONVOLUTION (FUNCTION)
void save_to_file(int A[I_SIZE][I_SIZE]); // SAVE TO FILE (FUNCTION)
/* M A I N */
int main(int argc, char *argv[])
{
// Input
int I[I_SIZE][I_SIZE] = {
{ 139, 13, 92, 251, 137, 124, 233, 91, 75, 11, 188, 184, 191, 118, 41, 228, 21, 93, 112, 51, 131, 141, 160, 76, 219, 211, 26, 241, 163, 140, 162, 195, 250, 227, 237, 217, 127, 148, 190, 89, 169, 243, 212, 135, 130, 33, 221, 234, 246, 200, 3, 120, 84, 208, 60, 152, 69, 155, 6, 34, 180, 240, 31, 99, 142, 88, 73, 201, 52, 83, 78, 186, 226, 229, 179, 214, 159, 38, 147, 101, 225, 207, 105, 167, 235, 10, 174, 98, 58, 59, 39, 61, 108, 70, 80, 9, 145, 48, 50, 44 },
{ 54, 109, 78, 229, 85, 118, 164, 49, 153, 76, 165, 114, 158, 228, 112, 113, 156, 183, 254, 3, 230, 132, 198, 64, 90, 130, 223, 115, 190, 18, 201, 70, 124, 125, 245, 150, 189, 253, 143, 72, 239, 119, 33, 120, 82, 181, 251, 172, 57, 6, 71, 166, 2, 131, 15, 58, 101, 117, 176, 152, 51, 135, 180, 249, 137, 11, 216, 133, 22, 232, 204, 157, 184, 111, 202, 47, 177, 182, 147, 191, 162, 227, 175, 84, 211, 79, 142, 243, 46, 195, 94, 188, 159, 171, 29, 185, 214, 169, 20, 238 },
{ 76, 188, 133, 42, 58, 128, 149, 253, 239, 114, 159, 74, 125, 105, 176, 40, 119, 198, 175, 26, 86, 20, 223, 214, 168, 204, 19, 15, 142, 80, 157, 6, 43, 232, 206, 230, 192, 242, 21, 124, 32, 217, 184, 127, 9, 189, 209, 254, 118, 112, 233, 140, 194, 144, 5, 95, 248, 181, 251, 1, 94, 73, 250, 49, 255, 85, 99, 82, 56, 236, 8, 39, 226, 69, 207, 107, 190, 171, 31, 103, 235, 154, 93, 155, 137, 14, 247, 59, 195, 132, 136, 174, 165, 3, 79, 224, 185, 151, 11, 104 },
{ 92, 178, 245, 180, 115, 88, 208, 134, 32, 204, 206, 182, 50, 129, 24, 221, 90, 230, 0, 252, 176, 251, 228, 80, 244, 51, 15, 22, 17, 83, 27, 52, 146, 168, 105, 58, 31, 13, 114, 184, 57, 53, 222, 56, 153, 69, 40, 93, 65, 71, 249, 186, 158, 63, 139, 9, 229, 133, 96, 28, 211, 130, 159, 171, 140, 166, 203, 116, 121, 219, 30, 73, 161, 55, 11, 2, 46, 141, 138, 197, 238, 164, 232, 199, 109, 98, 102, 179, 86, 196, 152, 118, 216, 87, 29, 205, 72, 254, 195, 119 },
{ 198, 115, 218, 111, 17, 131, 56, 139, 86, 182, 39, 157, 235, 85, 120, 180, 227, 232, 118, 109, 83, 188, 249, 103, 59, 213, 78, 55, 29, 207, 89, 72, 106, 75, 238, 28, 14, 158, 79, 7, 191, 34, 145, 236, 143, 166, 203, 151, 146, 40, 154, 160, 152, 6, 159, 81, 130, 18, 19, 161, 239, 132, 42, 171, 178, 65, 97, 123, 126, 181, 88, 174, 149, 219, 22, 195, 153, 77, 140, 200, 138, 205, 71, 122, 116, 94, 189, 61, 54, 177, 121, 48, 100, 87, 199, 231, 224, 13, 184, 164 },
{ 180, 250, 104, 194, 235, 136, 2, 242, 138, 24, 209, 119, 210, 38, 155, 203, 251, 248, 149, 118, 247, 19, 230, 44, 1, 17, 202, 69, 191, 106, 131, 238, 43, 243, 221, 47, 100, 123, 222, 90, 253, 6, 246, 84, 229, 193, 228, 255, 142, 18, 133, 154, 132, 101, 3, 48, 117, 165, 205, 21, 86, 156, 254, 34, 109, 50, 158, 240, 4, 75, 170, 169, 92, 80, 147, 245, 63, 77, 204, 83, 11, 49, 68, 13, 236, 130, 112, 179, 76, 174, 167, 216, 116, 70, 88, 159, 129, 87, 22, 173 },
{ 45, 99, 115, 242, 110, 189, 164, 94, 114, 183, 181, 154, 151, 91, 209, 4, 78, 198, 72, 203, 116, 188, 232, 69, 157, 136, 106, 177, 7, 245, 1, 55, 105, 75, 205, 14, 184, 210, 216, 51, 165, 250, 5, 220, 173, 231, 58, 118, 86, 130, 97, 56, 153, 61, 64, 127, 156, 140, 186, 122, 191, 132, 201, 57, 168, 26, 175, 125, 227, 162, 79, 62, 172, 120, 146, 104, 143, 174, 102, 121, 155, 17, 89, 71, 28, 96, 126, 170, 43, 63, 111, 249, 195, 207, 33, 9, 196, 108, 135, 25 },
{ 189, 239, 219, 14, 226, 179, 158, 89, 111, 102, 87, 59, 6, 142, 62, 53, 30, 1, 136, 180, 120, 133, 5, 190, 162, 90, 36, 94, 191, 241, 11, 24, 58, 116, 103, 38, 143, 170, 74, 152, 188, 65, 48, 176, 249, 145, 251, 29, 245, 3, 15, 52, 92, 19, 124, 97, 139, 236, 151, 221, 27, 68, 72, 193, 140, 244, 225, 26, 0, 115, 206, 232, 131, 35, 69, 4, 98, 197, 215, 113, 246, 172, 42, 93, 196, 178, 216, 213, 12, 198, 211, 71, 203, 122, 218, 167, 148, 242, 223, 163 },
{ 172, 216, 56, 50, 250, 182, 79, 113, 132, 130, 83, 30, 179, 213, 149, 37, 196, 4, 19, 73, 20, 99, 251, 34, 173, 185, 71, 74, 158, 84, 162, 86, 26, 141, 112, 120, 106, 62, 63, 40, 204, 38, 33, 49, 118, 111, 226, 138, 124, 2, 80, 201, 41, 247, 36, 234, 150, 61, 85, 253, 110, 176, 171, 167, 224, 170, 188, 151, 193, 1, 115, 145, 109, 184, 232, 223, 144, 51, 187, 43, 48, 94, 65, 215, 127, 135, 29, 248, 212, 221, 235, 202, 72, 67, 131, 52, 21, 210, 23, 92 },
{ 9, 211, 28, 182, 226, 206, 13, 189, 167, 125, 183, 177, 104, 180, 221, 173, 53, 151, 11, 40, 4, 45, 88, 209, 143, 95, 247, 215, 69, 158, 46, 37, 126, 196, 92, 244, 103, 201, 141, 10, 121, 179, 112, 56, 223, 98, 34, 87, 222, 120, 102, 156, 90, 153, 64, 22, 140, 75, 216, 57, 99, 44, 139, 229, 17, 163, 131, 255, 157, 5, 24, 200, 67, 248, 114, 249, 164, 191, 117, 224, 55, 52, 32, 89, 49, 38, 218, 73, 193, 148, 101, 59, 252, 159, 155, 178, 192, 169, 186, 107 },
{ 94, 16, 122, 204, 188, 55, 238, 39, 173, 85, 7, 79, 0, 44, 234, 159, 243, 250, 246, 33, 236, 175, 25, 58, 57, 24, 182, 150, 224, 218, 249, 70, 103, 242, 194, 15, 132, 74, 36, 52, 232, 255, 136, 212, 87, 23, 115, 252, 59, 206, 30, 130, 195, 111, 149, 26, 251, 64, 50, 80, 139, 104, 223, 189, 17, 69, 29, 46, 138, 193, 38, 62, 47, 186, 35, 133, 49, 207, 214, 166, 145, 183, 169, 253, 8, 220, 235, 228, 73, 248, 200, 165, 164, 2, 6, 101, 37, 88, 140, 14 },
{ 118, 139, 179, 58, 148, 215, 245, 227, 32, 87, 206, 200, 128, 36, 230, 183, 216, 201, 69, 41, 210, 110, 39, 173, 74, 37, 140, 137, 154, 223, 244, 164, 246, 101, 145, 12, 138, 172, 16, 43, 180, 8, 255, 232, 247, 21, 238, 249, 20, 100, 211, 104, 240, 239, 83, 14, 10, 151, 160, 22, 105, 78, 33, 199, 103, 116, 212, 68, 96, 55, 102, 80, 49, 135, 217, 233, 98, 189, 9, 202, 108, 144, 111, 177, 59, 159, 153, 0, 48, 196, 3, 112, 147, 155, 134, 191, 60, 143, 146, 150 },
{ 123, 115, 207, 198, 232, 170, 92, 129, 58, 62, 169, 135, 29, 69, 11, 51, 70, 174, 111, 184, 20, 200, 30, 239, 244, 89, 212, 154, 42, 34, 83, 6, 44, 52, 31, 195, 160, 176, 53, 94, 71, 156, 102, 163, 56, 139, 65, 90, 225, 243, 150, 84, 60, 72, 179, 192, 101, 12, 146, 229, 100, 218, 109, 48, 223, 122, 248, 78, 33, 140, 238, 190, 167, 158, 241, 3, 133, 35, 205, 38, 242, 49, 250, 157, 177, 32, 209, 172, 142, 159, 219, 240, 247, 204, 82, 217, 77, 221, 185, 253 },
{ 209, 236, 108, 235, 93, 205, 146, 203, 152, 154, 158, 77, 89, 175, 38, 241, 242, 248, 160, 111, 163, 170, 147, 208, 110, 244, 50, 75, 62, 189, 181, 104, 88, 195, 134, 143, 253, 153, 218, 135, 144, 92, 127, 204, 166, 174, 219, 162, 2, 115, 30, 114, 151, 55, 24, 149, 70, 15, 82, 145, 106, 124, 61, 179, 96, 107, 112, 148, 45, 165, 243, 29, 183, 250, 187, 254, 100, 225, 25, 102, 198, 180, 185, 109, 21, 41, 126, 150, 226, 20, 238, 119, 159, 245, 217, 233, 86, 43, 184, 211 },
{ 81, 194, 56, 92, 223, 110, 203, 42, 95, 122, 255, 101, 2, 117, 22, 185, 159, 214, 25, 215, 127, 227, 176, 136, 16, 7, 15, 84, 188, 23, 213, 245, 130, 182, 254, 175, 156, 106, 243, 118, 59, 28, 149, 228, 27, 99, 60, 6, 250, 237, 49, 216, 140, 102, 248, 48, 121, 74, 231, 89, 181, 162, 148, 252, 119, 180, 3, 239, 0, 76, 9, 179, 105, 224, 172, 57, 77, 86, 31, 218, 64, 79, 146, 145, 192, 38, 111, 167, 35, 154, 229, 58, 124, 199, 155, 246, 109, 125, 62, 39 },
{ 191, 22, 41, 53, 27, 11, 168, 52, 101, 18, 145, 132, 36, 93, 121, 102, 199, 180, 33, 99, 67, 63, 200, 165, 59, 24, 136, 92, 198, 207, 23, 106, 73, 196, 118, 238, 228, 94, 176, 21, 221, 250, 96, 231, 20, 39, 185, 76, 151, 245, 61, 249, 26, 77, 240, 128, 113, 242, 135, 236, 42, 156, 83, 44, 129, 10, 17, 218, 169, 19, 212, 224, 50, 127, 211, 192, 115, 7, 157, 6, 55, 15, 46, 248, 237, 220, 105, 166, 109, 107, 148, 170, 159, 114, 160, 86, 215, 230, 183, 252 },
{ 91, 182, 176, 165, 101, 30, 88, 1, 7, 239, 120, 77, 207, 138, 107, 150, 16, 124, 156, 130, 229, 81, 115, 83, 118, 151, 210, 213, 119, 11, 29, 131, 187, 127, 129, 97, 70, 186, 35, 236, 95, 117, 102, 3, 143, 240, 0, 198, 108, 22, 215, 19, 53, 112, 45, 42, 214, 32, 73, 202, 6, 89, 164, 116, 72, 188, 59, 157, 25, 14, 126, 33, 252, 163, 234, 178, 71, 248, 179, 41, 109, 228, 46, 121, 154, 44, 85, 167, 155, 15, 136, 142, 103, 79, 48, 253, 105, 84, 78, 87 },
{ 14, 76, 159, 62, 67, 180, 39, 86, 61, 170, 155, 187, 34, 64, 109, 87, 183, 21, 84, 70, 201, 130, 117, 29, 36, 27, 161, 221, 114, 234, 208, 73, 44, 231, 97, 60, 230, 215, 181, 207, 243, 237, 128, 17, 251, 124, 252, 126, 5, 9, 168, 55, 37, 93, 160, 222, 30, 112, 174, 144, 65, 131, 211, 203, 85, 175, 119, 52, 89, 16, 0, 188, 120, 156, 157, 164, 152, 48, 66, 57, 22, 218, 241, 205, 151, 1, 15, 240, 80, 94, 45, 228, 206, 204, 118, 173, 150, 74, 106, 41 },
{ 11, 33, 44, 45, 78, 25, 60, 130, 62, 195, 197, 114, 117, 233, 116, 230, 121, 194, 138, 8, 246, 199, 208, 53, 203, 242, 236, 150, 157, 212, 107, 224, 140, 153, 243, 170, 56, 143, 168, 235, 251, 5, 92, 7, 9, 122, 160, 148, 71, 29, 186, 155, 104, 141, 202, 207, 215, 248, 253, 31, 85, 196, 169, 35, 135, 146, 58, 154, 216, 231, 39, 34, 227, 28, 180, 97, 179, 87, 63, 41, 105, 70, 252, 118, 144, 225, 0, 37, 172, 67, 139, 219, 220, 149, 185, 136, 128, 159, 244, 250 },
{ 124, 22, 177, 103, 216, 97, 186, 42, 143, 225, 125, 134, 153, 1, 71, 172, 230, 117, 190, 213, 82, 38, 205, 68, 215, 250, 13, 12, 131, 226, 237, 11, 148, 123, 67, 107, 146, 49, 56, 28, 61, 100, 142, 152, 55, 79, 206, 39, 191, 135, 204, 171, 109, 16, 129, 232, 127, 35, 234, 162, 3, 47, 203, 244, 202, 175, 130, 179, 6, 87, 84, 207, 121, 247, 233, 53, 137, 120, 160, 140, 180, 245, 31, 72, 102, 188, 166, 193, 183, 43, 253, 40, 89, 65, 4, 30, 0, 85, 106, 150 },
{ 220, 245, 128, 195, 152, 233, 187, 122, 41, 196, 63, 151, 24, 52, 142, 237, 181, 44, 143, 218, 215, 211, 235, 106, 75, 97, 83, 118, 251, 90, 101, 68, 23, 92, 209, 184, 7, 236, 61, 162, 219, 253, 250, 159, 64, 139, 46, 246, 224, 157, 116, 134, 188, 174, 252, 103, 135, 197, 173, 45, 42, 25, 255, 129, 56, 193, 55, 247, 141, 115, 199, 2, 154, 200, 254, 126, 191, 147, 206, 194, 105, 161, 34, 222, 88, 185, 32, 148, 179, 145, 112, 0, 33, 221, 225, 146, 53, 77, 20, 153 },
{ 144, 27, 6, 29, 85, 42, 179, 176, 200, 198, 161, 58, 46, 149, 51, 160, 252, 59, 89, 238, 132, 246, 165, 172, 151, 215, 55, 240, 61, 131, 229, 116, 212, 30, 145, 150, 194, 105, 104, 141, 99, 196, 130, 169, 204, 137, 60, 234, 20, 216, 114, 156, 48, 90, 110, 9, 118, 112, 120, 82, 164, 250, 75, 38, 171, 255, 79, 1, 192, 126, 2, 193, 76, 54, 37, 111, 226, 69, 109, 22, 236, 254, 122, 233, 47, 136, 138, 189, 32, 227, 7, 139, 129, 230, 184, 117, 67, 103, 124, 15 },
{ 153, 152, 134, 24, 92, 192, 86, 106, 28, 11, 119, 208, 103, 143, 109, 198, 135, 18, 48, 44, 83, 61, 118, 64, 0, 120, 104, 75, 231, 52, 95, 193, 157, 214, 240, 252, 255, 46, 161, 128, 146, 30, 147, 126, 200, 58, 164, 243, 191, 131, 56, 182, 150, 145, 39, 82, 180, 237, 123, 105, 163, 7, 175, 77, 139, 241, 206, 225, 155, 31, 8, 242, 154, 3, 89, 132, 141, 54, 99, 235, 34, 186, 36, 102, 68, 223, 14, 67, 121, 63, 188, 203, 159, 179, 209, 122, 215, 246, 171, 124 },
{ 180, 244, 141, 119, 65, 11, 46, 240, 38, 2, 231, 163, 222, 234, 113, 31, 79, 183, 48, 145, 60, 55, 42, 144, 92, 102, 224, 218, 7, 203, 159, 158, 44, 176, 196, 59, 26, 182, 106, 97, 200, 212, 170, 51, 219, 127, 186, 206, 64, 174, 235, 137, 167, 111, 162, 230, 213, 19, 74, 12, 133, 28, 136, 69, 88, 112, 147, 245, 173, 223, 29, 34, 10, 198, 99, 216, 148, 18, 160, 117, 197, 53, 187, 58, 232, 54, 9, 45, 100, 166, 195, 109, 155, 143, 50, 128, 150, 84, 194, 8 },
{ 175, 177, 59, 162, 129, 146, 219, 106, 84, 168, 30, 225, 62, 153, 22, 98, 220, 247, 42, 40, 110, 189, 197, 236, 142, 60, 141, 233, 51, 240, 94, 97, 53, 164, 74, 31, 132, 179, 139, 226, 248, 200, 156, 207, 244, 208, 121, 227, 23, 221, 118, 103, 64, 204, 136, 192, 34, 217, 229, 33, 127, 89, 211, 7, 66, 145, 28, 245, 83, 214, 180, 222, 199, 172, 76, 193, 56, 249, 195, 87, 243, 216, 18, 166, 115, 50, 223, 19, 203, 176, 57, 239, 120, 152, 252, 144, 242, 67, 235, 63 },
{ 12, 147, 246, 157, 66, 136, 76, 41, 141, 121, 241, 20, 6, 227, 143, 107, 37, 210, 83, 154, 85, 109, 133, 44, 48, 10, 77, 1, 245, 108, 213, 194, 138, 217, 40, 160, 253, 65, 103, 226, 184, 169, 206, 239, 17, 130, 225, 251, 211, 32, 23, 149, 172, 179, 93, 254, 191, 222, 74, 127, 139, 248, 205, 16, 54, 110, 152, 7, 201, 161, 24, 96, 204, 81, 207, 236, 18, 192, 56, 71, 100, 104, 86, 84, 250, 237, 19, 3, 90, 195, 228, 45, 8, 94, 5, 166, 30, 144, 212, 72 },
{ 185, 34, 56, 115, 210, 71, 7, 38, 160, 197, 196, 254, 73, 241, 242, 112, 201, 87, 136, 187, 198, 101, 21, 164, 177, 36, 176, 221, 168, 43, 25, 128, 170, 93, 86, 150, 191, 156, 179, 233, 64, 29, 240, 190, 5, 91, 158, 72, 23, 245, 223, 44, 208, 123, 237, 200, 74, 40, 153, 141, 205, 209, 246, 51, 138, 203, 47, 239, 247, 17, 131, 222, 121, 37, 120, 2, 161, 117, 135, 193, 192, 81, 99, 204, 0, 80, 67, 213, 9, 31, 219, 59, 148, 188, 243, 165, 214, 146, 52, 60 },
{ 116, 135, 66, 227, 138, 149, 211, 93, 163, 200, 104, 85, 155, 108, 38, 8, 51, 82, 216, 21, 251, 126, 196, 154, 185, 76, 183, 249, 112, 64, 225, 62, 9, 181, 125, 61, 240, 63, 114, 16, 134, 221, 214, 80, 56, 30, 190, 59, 224, 18, 102, 27, 152, 219, 151, 31, 115, 83, 79, 50, 3, 188, 122, 177, 206, 231, 229, 39, 49, 32, 218, 180, 98, 145, 143, 255, 235, 133, 92, 67, 26, 230, 43, 203, 212, 91, 168, 44, 186, 205, 48, 144, 160, 150, 167, 74, 136, 173, 94, 195 },
{ 173, 152, 185, 206, 116, 218, 248, 59, 147, 92, 33, 176, 45, 226, 232, 192, 80, 113, 242, 63, 102, 67, 181, 60, 8, 84, 203, 69, 150, 187, 228, 177, 0, 167, 201, 190, 128, 239, 157, 196, 165, 110, 78, 86, 37, 62, 76, 156, 174, 194, 253, 182, 238, 126, 212, 236, 106, 98, 39, 195, 48, 180, 131, 22, 223, 18, 55, 108, 77, 24, 47, 12, 233, 205, 251, 5, 127, 217, 44, 244, 164, 11, 38, 1, 130, 34, 52, 249, 120, 49, 136, 112, 183, 42, 230, 240, 43, 186, 90, 124 },
{ 142, 226, 115, 169, 243, 50, 129, 136, 196, 188, 102, 1, 48, 4, 59, 203, 75, 125, 98, 215, 15, 95, 210, 80, 232, 168, 223, 254, 69, 239, 175, 84, 87, 225, 34, 28, 208, 62, 66, 77, 122, 97, 146, 25, 58, 30, 147, 42, 153, 24, 163, 119, 165, 107, 199, 6, 41, 202, 174, 23, 124, 205, 19, 81, 255, 143, 167, 180, 2, 154, 214, 151, 121, 171, 126, 113, 123, 60, 85, 184, 96, 104, 155, 190, 200, 57, 187, 245, 182, 73, 89, 16, 185, 191, 159, 71, 128, 249, 40, 217 },
{ 137, 59, 75, 114, 131, 248, 48, 39, 88, 203, 123, 3, 103, 98, 115, 153, 180, 234, 194, 85, 141, 16, 187, 108, 41, 8, 63, 4, 163, 25, 193, 160, 185, 32, 135, 116, 207, 201, 182, 168, 215, 140, 179, 143, 208, 238, 19, 214, 162, 247, 191, 254, 34, 161, 27, 11, 159, 239, 249, 229, 158, 101, 128, 0, 20, 53, 122, 232, 112, 183, 237, 155, 144, 236, 43, 55, 223, 198, 125, 83, 118, 233, 13, 250, 136, 149, 197, 184, 51, 69, 139, 190, 92, 132, 36, 65, 56, 93, 33, 47 },
{ 2, 37, 157, 183, 168, 59, 10, 113, 7, 199, 26, 219, 127, 134, 181, 0, 162, 44, 159, 245, 105, 69, 148, 173, 13, 82, 214, 16, 169, 165, 255, 21, 135, 58, 50, 170, 176, 205, 22, 191, 115, 41, 241, 139, 51, 244, 36, 182, 109, 80, 94, 224, 243, 11, 151, 62, 178, 254, 203, 251, 247, 18, 198, 86, 23, 221, 73, 144, 76, 20, 79, 154, 72, 33, 15, 64, 39, 32, 8, 202, 129, 249, 29, 104, 225, 52, 78, 85, 28, 236, 114, 77, 210, 108, 142, 201, 27, 106, 250, 57 },
{ 144, 250, 154, 8, 68, 10, 131, 222, 198, 246, 71, 143, 126, 227, 78, 172, 233, 155, 62, 220, 204, 91, 237, 2, 61, 6, 16, 98, 105, 145, 52, 152, 177, 219, 191, 133, 149, 48, 252, 208, 51, 211, 99, 207, 56, 229, 203, 114, 195, 96, 35, 102, 248, 130, 37, 251, 80, 38, 146, 205, 231, 196, 33, 26, 174, 181, 218, 139, 254, 118, 66, 41, 55, 87, 223, 202, 184, 175, 53, 125, 111, 110, 17, 141, 106, 164, 79, 166, 238, 163, 100, 221, 49, 74, 3, 234, 59, 228, 189, 31 },
{ 95, 22, 152, 75, 121, 117, 3, 91, 66, 168, 112, 243, 27, 217, 122, 176, 98, 53, 43, 131, 251, 96, 62, 44, 242, 36, 115, 236, 202, 60, 161, 47, 129, 46, 119, 238, 2, 224, 154, 138, 229, 118, 190, 208, 125, 180, 197, 71, 35, 219, 38, 223, 63, 204, 31, 58, 68, 136, 23, 107, 194, 37, 234, 114, 169, 82, 1, 185, 150, 42, 227, 10, 120, 244, 165, 231, 157, 80, 147, 12, 226, 128, 205, 159, 78, 4, 175, 26, 188, 148, 109, 106, 248, 32, 99, 29, 93, 225, 81, 149 },
{ 148, 96, 11, 4, 189, 64, 146, 75, 18, 106, 234, 114, 88, 204, 214, 209, 160, 19, 22, 250, 83, 169, 105, 153, 110, 152, 17, 233, 206, 74, 131, 228, 103, 90, 144, 130, 94, 35, 185, 102, 240, 61, 92, 168, 27, 203, 86, 197, 190, 182, 68, 162, 172, 166, 79, 95, 236, 87, 89, 119, 66, 71, 175, 229, 33, 217, 205, 163, 101, 194, 244, 57, 134, 154, 76, 38, 104, 136, 8, 15, 97, 150, 237, 31, 107, 158, 6, 159, 51, 174, 220, 49, 85, 13, 42, 77, 230, 0, 113, 82 },
{ 98, 200, 25, 154, 83, 125, 224, 146, 30, 128, 188, 18, 238, 104, 96, 72, 144, 160, 43, 91, 131, 121, 107, 73, 100, 230, 42, 126, 223, 111, 19, 218, 80, 174, 178, 87, 202, 94, 34, 76, 136, 92, 179, 182, 159, 60, 152, 59, 247, 70, 41, 145, 197, 209, 236, 17, 90, 219, 52, 210, 21, 233, 93, 176, 251, 220, 194, 28, 133, 189, 95, 190, 106, 208, 99, 11, 212, 240, 109, 10, 62, 185, 217, 157, 1, 201, 84, 134, 69, 47, 161, 129, 158, 27, 54, 193, 46, 135, 221, 22 },
{ 108, 249, 83, 178, 181, 191, 182, 23, 105, 241, 213, 232, 146, 211, 113, 123, 9, 128, 59, 32, 51, 104, 114, 61, 159, 189, 107, 28, 78, 137, 35, 165, 176, 12, 180, 112, 148, 24, 76, 81, 120, 101, 102, 71, 22, 44, 223, 85, 228, 77, 205, 47, 84, 136, 143, 65, 88, 172, 253, 40, 225, 239, 226, 244, 169, 125, 215, 103, 16, 162, 231, 217, 197, 115, 91, 73, 66, 224, 230, 171, 7, 6, 204, 140, 147, 208, 48, 175, 164, 60, 157, 74, 117, 154, 129, 43, 173, 156, 219, 96 },
{ 170, 114, 209, 3, 199, 40, 153, 4, 54, 108, 123, 215, 22, 135, 141, 173, 203, 207, 88, 110, 221, 79, 146, 99, 254, 124, 240, 120, 104, 132, 211, 78, 151, 100, 38, 83, 11, 206, 150, 138, 64, 175, 52, 159, 48, 177, 247, 205, 81, 50, 219, 41, 200, 232, 230, 184, 224, 213, 0, 37, 82, 46, 17, 239, 93, 156, 87, 139, 241, 152, 58, 250, 208, 102, 178, 85, 7, 245, 23, 6, 9, 128, 130, 194, 169, 74, 21, 59, 166, 210, 220, 69, 61, 147, 77, 204, 243, 105, 89, 101 },
{ 147, 254, 53, 173, 239, 189, 142, 225, 192, 47, 109, 68, 155, 93, 57, 4, 135, 56, 27, 108, 31, 58, 251, 194, 154, 15, 88, 227, 128, 235, 67, 210, 46, 172, 157, 242, 22, 188, 171, 130, 131, 166, 52, 19, 11, 200, 24, 110, 123, 62, 161, 55, 217, 222, 29, 95, 167, 231, 238, 21, 42, 229, 246, 1, 76, 101, 203, 237, 241, 74, 216, 84, 80, 119, 48, 105, 253, 71, 14, 116, 72, 178, 205, 226, 60, 117, 138, 9, 153, 91, 136, 99, 182, 118, 0, 139, 159, 66, 212, 35 },
{ 193, 62, 172, 123, 94, 253, 213, 241, 97, 104, 160, 131, 252, 158, 31, 198, 181, 205, 201, 25, 163, 173, 8, 95, 164, 233, 6, 91, 41, 229, 215, 254, 14, 222, 105, 117, 176, 137, 65, 211, 80, 135, 177, 68, 157, 168, 18, 103, 28, 47, 110, 99, 69, 239, 81, 40, 132, 149, 183, 169, 7, 190, 75, 21, 143, 44, 246, 166, 50, 237, 42, 78, 151, 9, 226, 46, 4, 54, 203, 214, 101, 53, 240, 189, 152, 34, 243, 88, 186, 3, 126, 43, 84, 180, 73, 154, 2, 11, 55, 15 },
{ 19, 45, 212, 9, 88, 194, 252, 121, 203, 27, 109, 117, 224, 20, 195, 127, 130, 223, 66, 34, 169, 233, 216, 159, 93, 180, 98, 92, 99, 10, 210, 90, 102, 191, 94, 225, 181, 108, 245, 142, 238, 103, 192, 237, 228, 24, 22, 35, 6, 176, 46, 187, 201, 250, 214, 33, 87, 111, 172, 186, 126, 236, 57, 62, 80, 31, 145, 232, 124, 122, 104, 134, 240, 14, 70, 217, 48, 199, 30, 21, 75, 43, 254, 106, 85, 41, 207, 234, 60, 110, 56, 119, 36, 50, 219, 167, 178, 189, 183, 200 },
{ 250, 17, 33, 157, 231, 245, 227, 156, 160, 30, 88, 63, 199, 161, 78, 218, 138, 12, 1, 191, 244, 208, 62, 77, 54, 120, 221, 252, 222, 176, 10, 164, 89, 81, 184, 76, 144, 107, 99, 40, 246, 47, 67, 79, 48, 251, 13, 183, 249, 45, 44, 32, 97, 121, 152, 159, 38, 132, 20, 189, 9, 243, 73, 140, 168, 235, 142, 26, 216, 35, 65, 225, 146, 209, 58, 217, 8, 151, 83, 3, 125, 93, 22, 34, 233, 167, 181, 106, 55, 213, 24, 4, 112, 197, 149, 177, 23, 133, 198, 39 },
{ 100, 98, 70, 253, 150, 241, 42, 165, 88, 123, 144, 30, 169, 220, 138, 80, 90, 234, 106, 189, 207, 26, 109, 47, 247, 224, 164, 163, 214, 61, 40, 217, 79, 0, 167, 200, 92, 134, 16, 112, 255, 45, 91, 44, 181, 56, 125, 53, 152, 71, 186, 81, 37, 19, 118, 97, 196, 38, 129, 76, 120, 133, 104, 124, 25, 151, 162, 35, 140, 111, 34, 12, 22, 48, 7, 108, 13, 121, 122, 205, 136, 46, 132, 119, 130, 87, 147, 105, 197, 160, 23, 248, 1, 2, 171, 168, 58, 222, 116, 17 },
{ 109, 247, 174, 252, 133, 19, 192, 97, 132, 80, 65, 13, 56, 54, 179, 74, 82, 204, 157, 146, 127, 92, 211, 238, 234, 235, 116, 53, 202, 130, 32, 180, 138, 131, 232, 230, 87, 38, 175, 0, 44, 67, 51, 35, 73, 220, 187, 151, 186, 22, 49, 139, 148, 94, 71, 69, 3, 24, 79, 195, 95, 218, 15, 200, 150, 244, 128, 91, 39, 88, 173, 46, 165, 228, 194, 199, 36, 221, 77, 14, 31, 1, 125, 41, 152, 193, 7, 61, 63, 254, 223, 154, 189, 159, 210, 231, 144, 47, 250, 112 },
{ 115, 55, 161, 107, 19, 59, 250, 93, 37, 31, 94, 42, 154, 87, 232, 243, 123, 244, 136, 34, 105, 43, 78, 165, 64, 75, 100, 140, 30, 7, 109, 69, 224, 168, 195, 135, 236, 174, 246, 210, 52, 145, 95, 157, 119, 110, 204, 197, 207, 211, 238, 192, 249, 237, 213, 62, 89, 181, 217, 146, 158, 39, 104, 68, 81, 70, 202, 90, 169, 159, 172, 80, 227, 57, 223, 255, 186, 1, 101, 44, 193, 98, 36, 54, 144, 24, 61, 96, 82, 88, 139, 183, 245, 184, 2, 216, 58, 177, 130, 120 },
{ 196, 90, 73, 97, 177, 17, 216, 112, 88, 48, 27, 184, 119, 41, 254, 182, 29, 244, 28, 136, 118, 222, 89, 120, 213, 246, 253, 167, 190, 101, 43, 243, 130, 51, 68, 61, 192, 170, 19, 84, 189, 127, 47, 181, 229, 132, 55, 252, 52, 235, 150, 36, 67, 147, 164, 114, 6, 74, 121, 25, 71, 54, 228, 211, 66, 146, 4, 185, 11, 168, 35, 2, 205, 70, 203, 7, 126, 178, 78, 172, 141, 131, 180, 206, 72, 245, 86, 116, 46, 1, 144, 145, 106, 225, 76, 165, 215, 156, 91, 239 },
{ 26, 219, 54, 140, 31, 17, 114, 58, 220, 222, 93, 238, 254, 60, 102, 115, 226, 236, 185, 53, 192, 245, 112, 244, 101, 247, 13, 171, 32, 160, 121, 174, 197, 87, 193, 45, 255, 77, 156, 78, 214, 126, 144, 148, 175, 196, 215, 252, 216, 40, 184, 163, 66, 218, 251, 168, 21, 199, 118, 23, 82, 3, 37, 104, 145, 154, 179, 46, 165, 0, 85, 237, 195, 106, 100, 176, 9, 38, 187, 42, 35, 5, 213, 68, 88, 56, 84, 183, 134, 34, 15, 227, 191, 130, 12, 69, 240, 72, 55, 108 },
{ 99, 51, 145, 147, 229, 119, 188, 204, 49, 160, 168, 97, 118, 23, 130, 22, 114, 30, 15, 42, 9, 74, 186, 57, 245, 140, 116, 177, 136, 81, 87, 50, 88, 173, 159, 228, 174, 222, 53, 94, 163, 83, 241, 139, 121, 75, 25, 150, 73, 137, 233, 107, 158, 132, 240, 227, 251, 180, 3, 209, 206, 242, 216, 244, 109, 89, 135, 117, 112, 185, 33, 190, 176, 214, 7, 1, 35, 67, 129, 54, 189, 201, 217, 223, 179, 120, 154, 208, 105, 232, 62, 90, 19, 252, 191, 77, 10, 205, 225, 181 },
{ 192, 1, 218, 13, 73, 72, 66, 231, 255, 224, 70, 4, 195, 97, 53, 108, 112, 77, 194, 42, 138, 12, 191, 71, 225, 181, 196, 102, 9, 2, 163, 230, 56, 217, 99, 87, 227, 107, 174, 90, 61, 166, 153, 142, 91, 106, 183, 236, 253, 44, 36, 157, 175, 41, 3, 121, 250, 50, 10, 204, 86, 170, 245, 132, 96, 27, 152, 209, 219, 34, 78, 64, 156, 141, 28, 164, 39, 30, 226, 134, 147, 129, 40, 161, 244, 252, 68, 187, 201, 169, 229, 81, 76, 159, 58, 172, 52, 32, 43, 35 },
{ 128, 253, 105, 166, 24, 135, 200, 252, 159, 34, 231, 230, 44, 205, 111, 160, 228, 226, 184, 221, 27, 213, 202, 19, 8, 132, 171, 212, 152, 23, 25, 18, 20, 247, 50, 82, 251, 62, 86, 124, 236, 48, 99, 74, 129, 173, 144, 4, 179, 101, 67, 137, 234, 92, 116, 29, 118, 46, 170, 161, 193, 66, 157, 43, 224, 102, 106, 94, 175, 198, 96, 5, 28, 219, 1, 223, 145, 151, 121, 71, 185, 57, 90, 164, 38, 199, 216, 218, 222, 188, 9, 15, 79, 65, 100, 31, 155, 32, 108, 21 },
{ 238, 79, 135, 55, 250, 160, 21, 202, 224, 51, 139, 92, 140, 111, 234, 205, 68, 200, 115, 197, 130, 170, 89, 35, 84, 239, 113, 157, 54, 153, 226, 91, 99, 180, 133, 175, 17, 103, 13, 29, 1, 129, 42, 228, 32, 66, 50, 240, 206, 143, 19, 15, 131, 104, 196, 75, 25, 230, 209, 142, 210, 220, 145, 30, 57, 246, 178, 39, 182, 10, 98, 85, 154, 107, 77, 56, 144, 179, 61, 109, 156, 27, 100, 71, 110, 231, 198, 136, 38, 44, 177, 229, 45, 161, 195, 60, 102, 33, 95, 36 },
{ 125, 133, 30, 228, 232, 66, 31, 82, 196, 249, 242, 103, 3, 211, 42, 152, 10, 90, 247, 146, 245, 158, 39, 151, 134, 159, 176, 208, 165, 32, 116, 84, 0, 206, 98, 12, 91, 130, 119, 57, 224, 144, 150, 56, 251, 80, 109, 14, 43, 118, 252, 33, 233, 213, 148, 81, 207, 189, 156, 230, 236, 136, 145, 117, 62, 89, 8, 238, 226, 219, 76, 174, 220, 44, 183, 202, 94, 199, 68, 13, 28, 157, 178, 185, 131, 79, 41, 6, 227, 210, 129, 172, 17, 167, 52, 100, 248, 177, 85, 1 },
{ 171, 88, 69, 71, 5, 137, 235, 59, 68, 174, 185, 151, 168, 16, 231, 144, 241, 113, 38, 230, 244, 165, 80, 253, 20, 156, 125, 107, 106, 245, 211, 146, 210, 212, 22, 37, 200, 62, 150, 209, 110, 169, 164, 124, 53, 51, 254, 41, 11, 94, 50, 197, 74, 223, 255, 138, 35, 109, 145, 189, 195, 242, 175, 208, 198, 251, 64, 27, 161, 12, 7, 204, 123, 142, 214, 108, 128, 40, 134, 194, 120, 243, 148, 188, 33, 77, 15, 224, 79, 179, 65, 158, 54, 219, 217, 180, 173, 105, 205, 18 },
{ 221, 60, 236, 11, 36, 92, 17, 119, 32, 81, 122, 253, 202, 76, 13, 206, 0, 252, 235, 229, 126, 70, 214, 234, 12, 105, 97, 218, 22, 96, 46, 226, 121, 37, 145, 167, 220, 193, 178, 25, 187, 3, 21, 49, 185, 180, 231, 195, 165, 94, 243, 107, 10, 210, 239, 39, 115, 159, 79, 15, 24, 4, 71, 139, 147, 204, 28, 188, 201, 251, 158, 175, 7, 157, 192, 240, 52, 199, 58, 62, 249, 223, 100, 217, 113, 196, 212, 33, 59, 35, 137, 112, 153, 176, 224, 216, 244, 87, 174, 73 },
{ 70, 72, 119, 63, 145, 13, 47, 66, 69, 77, 237, 213, 184, 241, 112, 64, 242, 56, 234, 85, 122, 103, 211, 216, 155, 22, 111, 186, 129, 208, 193, 204, 133, 136, 148, 75, 40, 127, 137, 6, 65, 104, 100, 162, 3, 93, 158, 110, 143, 96, 12, 217, 180, 179, 255, 151, 243, 29, 38, 88, 139, 253, 152, 131, 182, 54, 149, 177, 209, 102, 232, 79, 154, 250, 132, 24, 26, 244, 176, 138, 201, 123, 53, 91, 142, 135, 121, 225, 43, 62, 76, 19, 195, 14, 187, 171, 229, 74, 254, 196 },
{ 46, 209, 150, 199, 164, 38, 222, 5, 212, 4, 167, 224, 195, 16, 75, 29, 225, 182, 153, 89, 194, 191, 48, 122, 118, 111, 186, 112, 239, 90, 120, 163, 36, 116, 147, 217, 87, 181, 23, 141, 241, 145, 47, 6, 252, 117, 245, 54, 12, 40, 178, 45, 85, 220, 160, 130, 143, 73, 236, 172, 96, 80, 106, 131, 110, 221, 144, 185, 26, 244, 213, 52, 228, 174, 33, 251, 99, 30, 170, 226, 157, 19, 27, 114, 161, 0, 3, 203, 123, 230, 234, 13, 255, 24, 113, 204, 39, 66, 43, 140 },
{ 82, 56, 141, 237, 76, 151, 41, 1, 51, 149, 104, 204, 33, 5, 43, 219, 17, 221, 36, 20, 71, 180, 184, 97, 6, 202, 60, 195, 192, 230, 171, 242, 193, 144, 188, 199, 244, 110, 64, 248, 22, 234, 30, 119, 84, 117, 120, 209, 66, 21, 53, 176, 249, 75, 58, 153, 178, 3, 239, 227, 27, 159, 206, 81, 175, 93, 37, 42, 136, 185, 132, 146, 122, 160, 47, 123, 96, 61, 130, 34, 50, 161, 154, 11, 255, 0, 246, 72, 78, 54, 134, 145, 194, 89, 229, 150, 88, 210, 99, 86 },
{ 162, 154, 85, 140, 155, 12, 146, 9, 71, 82, 168, 107, 67, 6, 123, 226, 180, 93, 249, 83, 95, 190, 3, 252, 130, 206, 59, 55, 193, 183, 50, 233, 77, 58, 239, 156, 73, 174, 136, 161, 81, 43, 122, 26, 216, 21, 49, 133, 158, 36, 11, 176, 28, 104, 163, 129, 29, 91, 247, 145, 108, 75, 152, 113, 186, 124, 92, 244, 237, 57, 125, 119, 0, 246, 69, 166, 224, 65, 41, 118, 198, 64, 221, 14, 214, 217, 51, 112, 116, 70, 194, 192, 255, 207, 10, 19, 134, 230, 4, 173 },
{ 54, 237, 68, 88, 122, 192, 35, 65, 229, 170, 113, 111, 39, 168, 211, 129, 26, 7, 1, 125, 97, 202, 182, 213, 107, 120, 164, 132, 64, 14, 20, 21, 255, 40, 121, 126, 140, 180, 177, 127, 147, 152, 25, 155, 183, 158, 212, 178, 33, 175, 13, 96, 220, 225, 187, 117, 217, 191, 53, 233, 207, 16, 2, 235, 204, 208, 32, 56, 161, 24, 137, 0, 45, 87, 116, 138, 100, 109, 150, 130, 245, 12, 105, 34, 85, 185, 93, 189, 167, 131, 3, 146, 52, 128, 228, 73, 218, 95, 136, 115 },
{ 110, 39, 98, 173, 65, 240, 2, 55, 107, 185, 153, 238, 24, 120, 249, 206, 242, 127, 247, 177, 64, 176, 129, 202, 191, 140, 207, 121, 193, 74, 147, 135, 89, 167, 44, 93, 12, 15, 164, 216, 99, 3, 117, 11, 23, 124, 62, 251, 197, 250, 236, 94, 196, 63, 71, 81, 172, 241, 228, 43, 203, 32, 168, 218, 154, 132, 115, 20, 255, 144, 230, 137, 205, 183, 163, 131, 253, 219, 235, 180, 7, 53, 220, 198, 148, 10, 215, 171, 212, 84, 151, 227, 157, 152, 95, 139, 195, 108, 18, 224 },
{ 142, 83, 35, 39, 125, 90, 45, 68, 29, 19, 61, 8, 184, 173, 100, 13, 18, 62, 212, 203, 52, 181, 67, 171, 127, 49, 55, 109, 214, 224, 85, 27, 202, 189, 228, 57, 226, 92, 235, 99, 218, 227, 245, 74, 4, 191, 79, 219, 149, 103, 42, 175, 148, 14, 152, 252, 178, 156, 51, 120, 253, 114, 21, 139, 124, 122, 48, 89, 210, 117, 223, 160, 167, 244, 205, 211, 216, 140, 241, 172, 0, 217, 248, 54, 30, 146, 153, 131, 161, 230, 91, 135, 240, 10, 197, 157, 225, 44, 70, 43 },
{ 182, 180, 197, 93, 43, 204, 8, 116, 234, 157, 14, 136, 168, 254, 165, 63, 154, 247, 20, 149, 139, 83, 94, 134, 140, 223, 231, 253, 220, 215, 86, 184, 251, 50, 45, 57, 33, 207, 79, 1, 105, 74, 67, 233, 179, 99, 25, 218, 24, 111, 34, 132, 72, 208, 209, 121, 32, 230, 191, 120, 31, 55, 12, 226, 246, 175, 141, 47, 114, 128, 39, 144, 127, 199, 19, 23, 161, 194, 131, 159, 187, 122, 84, 90, 133, 98, 42, 13, 28, 92, 26, 60, 135, 172, 224, 225, 227, 95, 129, 113 },
{ 151, 68, 90, 227, 211, 21, 223, 184, 46, 237, 221, 82, 186, 48, 27, 17, 95, 24, 43, 86, 173, 178, 246, 5, 91, 99, 109, 245, 251, 153, 31, 47, 191, 34, 72, 214, 229, 49, 53, 176, 204, 52, 180, 45, 242, 70, 75, 50, 234, 167, 145, 241, 140, 62, 219, 76, 30, 15, 110, 124, 116, 206, 224, 205, 132, 108, 19, 253, 240, 197, 38, 166, 248, 14, 160, 26, 131, 63, 128, 199, 16, 71, 11, 28, 208, 104, 185, 201, 32, 162, 243, 146, 103, 77, 107, 0, 88, 231, 117, 230 },
{ 78, 185, 212, 198, 3, 147, 144, 56, 66, 90, 238, 158, 10, 227, 73, 47, 55, 53, 207, 230, 89, 165, 217, 46, 128, 54, 219, 245, 164, 107, 248, 249, 199, 127, 203, 172, 103, 126, 131, 95, 112, 173, 186, 87, 113, 44, 41, 153, 187, 18, 134, 35, 175, 1, 142, 43, 23, 98, 67, 117, 237, 27, 40, 196, 71, 51, 64, 171, 92, 232, 160, 109, 235, 77, 69, 221, 255, 25, 241, 218, 4, 99, 180, 148, 60, 220, 8, 254, 223, 140, 111, 136, 91, 169, 81, 197, 231, 57, 30, 204 },
{ 134, 30, 107, 53, 45, 19, 2, 208, 181, 67, 192, 104, 166, 214, 10, 212, 105, 162, 84, 137, 6, 131, 245, 111, 38, 204, 194, 146, 199, 226, 3, 95, 252, 23, 178, 129, 168, 228, 109, 35, 254, 230, 16, 63, 196, 155, 14, 18, 135, 240, 115, 144, 136, 238, 33, 153, 51, 255, 102, 25, 116, 121, 154, 86, 151, 69, 227, 236, 41, 232, 59, 247, 253, 219, 234, 179, 90, 26, 85, 195, 140, 12, 165, 150, 29, 183, 205, 122, 187, 114, 31, 243, 250, 163, 68, 170, 225, 66, 60, 99 },
{ 88, 40, 203, 63, 114, 216, 109, 250, 117, 94, 245, 209, 47, 236, 101, 98, 62, 41, 35, 52, 113, 27, 49, 84, 50, 249, 238, 69, 70, 44, 184, 97, 252, 10, 173, 39, 33, 147, 54, 102, 25, 200, 3, 139, 24, 64, 237, 222, 78, 136, 153, 81, 60, 103, 138, 241, 17, 77, 135, 29, 7, 125, 76, 12, 181, 226, 95, 65, 174, 217, 74, 37, 104, 148, 89, 59, 150, 42, 13, 235, 234, 99, 247, 196, 243, 121, 154, 231, 36, 93, 92, 176, 210, 66, 85, 229, 46, 111, 214, 75 },
{ 166, 96, 209, 239, 1, 191, 217, 251, 35, 163, 59, 116, 206, 254, 205, 148, 68, 147, 242, 143, 69, 244, 141, 22, 82, 52, 121, 118, 81, 45, 28, 2, 36, 162, 91, 90, 34, 253, 38, 131, 223, 133, 216, 207, 119, 102, 72, 225, 13, 149, 161, 39, 8, 249, 184, 114, 44, 132, 180, 227, 88, 57, 89, 109, 10, 155, 33, 201, 94, 137, 74, 20, 60, 177, 198, 220, 48, 168, 0, 212, 196, 160, 189, 150, 15, 43, 136, 4, 110, 250, 128, 230, 92, 86, 228, 12, 93, 97, 75, 197 },
{ 36, 194, 255, 225, 131, 39, 34, 1, 228, 192, 14, 149, 50, 180, 140, 148, 186, 171, 248, 244, 105, 42, 98, 95, 3, 114, 99, 124, 12, 66, 107, 43, 137, 216, 5, 218, 56, 103, 198, 109, 199, 185, 133, 190, 252, 32, 224, 52, 161, 121, 41, 213, 232, 47, 37, 227, 146, 69, 102, 196, 46, 40, 238, 178, 207, 61, 0, 147, 129, 9, 97, 251, 184, 126, 189, 67, 144, 209, 73, 93, 127, 239, 17, 54, 19, 96, 155, 85, 88, 240, 167, 182, 80, 72, 106, 71, 203, 112, 234, 177 },
{ 82, 148, 169, 138, 79, 230, 88, 181, 46, 206, 3, 163, 19, 149, 67, 197, 139, 2, 30, 188, 98, 195, 246, 17, 99, 44, 5, 76, 251, 135, 214, 8, 65, 10, 34, 53, 146, 29, 37, 106, 11, 218, 249, 48, 63, 118, 176, 175, 219, 231, 157, 75, 201, 172, 145, 84, 7, 154, 178, 186, 89, 165, 49, 144, 66, 187, 182, 221, 23, 180, 253, 220, 202, 112, 216, 72, 244, 200, 247, 228, 105, 173, 9, 52, 45, 151, 147, 96, 71, 199, 87, 97, 143, 236, 56, 6, 102, 229, 81, 194 },
{ 79, 115, 139, 50, 65, 44, 116, 14, 49, 90, 67, 209, 224, 2, 59, 104, 69, 199, 206, 119, 136, 212, 21, 211, 190, 164, 31, 137, 55, 204, 177, 186, 197, 195, 86, 74, 92, 33, 85, 239, 118, 158, 196, 237, 36, 57, 220, 11, 149, 62, 30, 169, 180, 188, 82, 28, 181, 95, 189, 251, 122, 100, 253, 191, 94, 120, 89, 106, 15, 179, 138, 200, 227, 159, 39, 155, 77, 34, 63, 29, 1, 223, 175, 96, 216, 245, 46, 142, 140, 153, 201, 8, 124, 194, 35, 20, 152, 87, 109, 187 },
{ 165, 45, 144, 43, 145, 174, 207, 92, 137, 161, 196, 203, 79, 182, 160, 113, 30, 140, 110, 236, 117, 179, 173, 50, 49, 139, 199, 70, 123, 175, 130, 163, 82, 183, 138, 16, 168, 176, 237, 55, 46, 5, 212, 189, 128, 220, 11, 35, 116, 9, 27, 230, 248, 91, 17, 36, 114, 221, 6, 171, 104, 208, 206, 131, 8, 214, 111, 213, 141, 125, 219, 146, 14, 227, 75, 194, 102, 136, 101, 172, 157, 251, 33, 85, 135, 64, 167, 222, 119, 159, 37, 122, 3, 255, 71, 41, 240, 204, 201, 93 },
{ 194, 192, 102, 86, 38, 58, 200, 97, 15, 227, 158, 146, 105, 210, 142, 159, 197, 74, 251, 220, 93, 184, 189, 9, 213, 6, 162, 113, 88, 41, 229, 173, 183, 94, 63, 168, 22, 123, 177, 163, 195, 81, 80, 250, 53, 150, 56, 101, 76, 156, 35, 65, 239, 219, 124, 242, 133, 98, 70, 190, 0, 134, 175, 236, 241, 185, 186, 244, 87, 57, 117, 127, 180, 228, 246, 216, 165, 223, 60, 36, 95, 233, 125, 75, 252, 255, 182, 148, 91, 39, 13, 92, 161, 109, 170, 33, 137, 37, 208, 129 },
{ 50, 179, 188, 200, 124, 199, 231, 223, 12, 158, 1, 194, 135, 96, 67, 198, 27, 70, 90, 9, 112, 146, 209, 241, 159, 56, 54, 165, 98, 214, 148, 103, 207, 111, 134, 100, 252, 141, 18, 16, 34, 93, 104, 166, 212, 79, 66, 129, 84, 175, 40, 201, 23, 0, 229, 59, 147, 20, 184, 215, 143, 232, 72, 130, 245, 244, 178, 249, 123, 5, 28, 47, 180, 39, 248, 109, 197, 239, 250, 52, 60, 11, 189, 196, 36, 225, 89, 133, 222, 69, 125, 43, 80, 162, 203, 193, 169, 32, 220, 71 },
{ 148, 110, 20, 113, 132, 193, 253, 212, 35, 125, 5, 28, 160, 105, 23, 31, 136, 235, 205, 100, 101, 15, 155, 146, 71, 237, 195, 154, 254, 45, 33, 8, 115, 94, 206, 224, 37, 139, 43, 65, 106, 72, 186, 129, 240, 211, 178, 172, 142, 32, 135, 143, 150, 57, 243, 48, 21, 167, 163, 66, 119, 61, 234, 182, 22, 228, 238, 74, 7, 73, 169, 60, 131, 77, 59, 50, 2, 83, 196, 181, 52, 10, 104, 90, 192, 44, 244, 26, 221, 126, 213, 229, 114, 174, 161, 171, 103, 145, 187, 112 },
{ 14, 49, 176, 79, 92, 63, 168, 45, 123, 252, 37, 82, 87, 208, 218, 238, 101, 185, 27, 85, 2, 135, 95, 233, 69, 84, 88, 166, 13, 57, 196, 165, 241, 100, 245, 39, 7, 236, 174, 127, 58, 213, 144, 219, 239, 177, 70, 224, 164, 42, 72, 31, 134, 43, 225, 187, 180, 244, 51, 133, 231, 145, 78, 206, 131, 136, 157, 115, 222, 106, 59, 179, 34, 15, 154, 178, 117, 151, 207, 33, 122, 83, 29, 67, 193, 126, 128, 76, 158, 223, 47, 5, 44, 129, 199, 250, 163, 242, 25, 161 },
{ 229, 70, 175, 88, 185, 123, 251, 44, 221, 45, 13, 239, 213, 115, 159, 219, 72, 50, 135, 181, 173, 171, 37, 228, 14, 19, 146, 210, 232, 216, 12, 147, 114, 113, 220, 253, 211, 46, 188, 134, 39, 98, 183, 249, 23, 194, 79, 236, 33, 84, 158, 245, 151, 2, 89, 242, 58, 118, 198, 157, 92, 91, 168, 67, 129, 222, 100, 99, 102, 40, 180, 248, 7, 34, 196, 95, 130, 212, 246, 192, 69, 237, 43, 57, 97, 20, 205, 104, 103, 201, 65, 1, 117, 5, 112, 52, 225, 55, 127, 109 },
{ 92, 54, 124, 25, 99, 128, 228, 90, 237, 153, 213, 216, 43, 210, 56, 15, 60, 102, 100, 108, 97, 83, 36, 251, 70, 115, 93, 158, 212, 243, 143, 130, 168, 184, 29, 173, 126, 125, 34, 50, 110, 2, 211, 204, 225, 28, 32, 176, 194, 38, 4, 61, 48, 166, 208, 18, 192, 5, 220, 186, 231, 209, 218, 226, 104, 105, 191, 240, 116, 178, 30, 229, 246, 101, 195, 207, 112, 19, 164, 134, 44, 224, 201, 232, 219, 23, 157, 16, 154, 80, 22, 179, 235, 247, 146, 196, 248, 203, 252, 200 },
{ 41, 98, 8, 27, 111, 89, 223, 52, 54, 230, 225, 236, 28, 73, 211, 71, 33, 61, 140, 133, 208, 83, 153, 20, 65, 254, 175, 189, 24, 62, 92, 13, 224, 161, 55, 85, 199, 107, 206, 218, 109, 117, 194, 25, 70, 241, 80, 122, 68, 101, 64, 156, 29, 44, 145, 74, 205, 131, 86, 82, 37, 26, 169, 5, 90, 134, 255, 138, 23, 238, 137, 197, 193, 167, 42, 210, 180, 235, 115, 239, 179, 16, 87, 231, 76, 39, 216, 181, 253, 163, 57, 178, 88, 45, 6, 240, 196, 48, 130, 18 },
{ 169, 0, 220, 211, 219, 20, 97, 136, 68, 33, 230, 177, 125, 252, 51, 188, 192, 195, 203, 114, 209, 147, 199, 197, 170, 198, 110, 143, 88, 31, 71, 193, 160, 205, 174, 7, 118, 67, 168, 105, 99, 175, 244, 144, 5, 243, 121, 103, 1, 182, 229, 155, 113, 111, 140, 37, 152, 77, 202, 255, 129, 207, 132, 167, 108, 127, 54, 115, 74, 173, 126, 234, 224, 50, 63, 95, 12, 150, 48, 91, 163, 59, 70, 47, 241, 216, 235, 17, 248, 21, 109, 42, 176, 117, 76, 185, 166, 62, 120, 190 },
{ 203, 73, 6, 231, 221, 107, 29, 254, 149, 2, 58, 236, 250, 57, 196, 25, 96, 103, 190, 14, 99, 23, 169, 17, 32, 162, 225, 180, 106, 144, 34, 87, 226, 188, 218, 110, 69, 204, 214, 127, 229, 145, 52, 64, 11, 161, 1, 126, 209, 245, 246, 232, 164, 117, 207, 150, 71, 65, 48, 142, 224, 174, 50, 153, 157, 85, 251, 136, 212, 67, 146, 210, 143, 205, 109, 77, 89, 230, 118, 5, 217, 189, 66, 102, 116, 252, 235, 113, 201, 108, 173, 234, 8, 137, 94, 15, 193, 42, 192, 197 },
{ 3, 153, 52, 185, 238, 14, 25, 77, 79, 112, 243, 178, 121, 236, 195, 30, 210, 71, 166, 212, 72, 54, 63, 186, 140, 81, 48, 250, 230, 194, 144, 61, 244, 241, 50, 214, 40, 68, 221, 183, 17, 182, 143, 118, 208, 119, 67, 107, 249, 173, 18, 217, 126, 92, 189, 34, 239, 117, 96, 160, 64, 20, 59, 161, 191, 133, 205, 43, 51, 165, 220, 216, 37, 207, 227, 222, 110, 13, 226, 152, 99, 170, 255, 247, 141, 176, 174, 111, 192, 109, 148, 58, 49, 224, 199, 46, 193, 122, 130, 89 },
{ 112, 221, 252, 13, 37, 90, 206, 144, 158, 43, 167, 81, 216, 114, 184, 182, 136, 34, 232, 163, 124, 35, 234, 207, 25, 44, 170, 73, 77, 67, 139, 1, 88, 111, 71, 21, 186, 38, 110, 197, 138, 126, 9, 212, 98, 244, 85, 27, 79, 82, 22, 28, 115, 121, 205, 119, 236, 198, 233, 113, 150, 162, 211, 87, 96, 173, 83, 70, 117, 109, 237, 188, 53, 152, 242, 154, 61, 149, 223, 190, 106, 48, 200, 254, 17, 225, 239, 175, 97, 24, 142, 147, 108, 10, 7, 92, 12, 194, 208, 222 },
{ 153, 57, 242, 173, 190, 232, 103, 198, 202, 211, 107, 226, 255, 124, 38, 130, 95, 141, 194, 196, 19, 60, 99, 216, 15, 67, 204, 229, 118, 243, 116, 2, 148, 28, 114, 55, 72, 217, 215, 58, 213, 40, 161, 101, 96, 71, 249, 154, 45, 3, 105, 46, 37, 181, 11, 35, 240, 224, 82, 80, 131, 108, 178, 220, 238, 111, 225, 168, 156, 86, 97, 49, 163, 27, 182, 31, 246, 17, 218, 85, 234, 104, 59, 241, 93, 87, 142, 247, 10, 175, 61, 149, 195, 75, 21, 166, 44, 252, 200, 231 },
{ 17, 73, 88, 170, 247, 21, 79, 226, 23, 40, 161, 13, 34, 62, 108, 146, 134, 46, 242, 51, 138, 214, 149, 53, 118, 95, 130, 103, 157, 102, 86, 168, 101, 190, 90, 158, 169, 107, 206, 70, 186, 241, 148, 129, 238, 1, 63, 84, 67, 58, 74, 167, 209, 76, 233, 229, 160, 255, 96, 139, 188, 194, 249, 197, 83, 4, 115, 87, 105, 201, 61, 85, 6, 133, 145, 219, 171, 8, 3, 56, 94, 10, 64, 191, 183, 178, 159, 195, 155, 228, 55, 251, 99, 192, 49, 97, 43, 117, 92, 135 },
{ 123, 82, 128, 130, 86, 248, 92, 254, 20, 52, 171, 112, 143, 36, 114, 239, 159, 180, 198, 191, 33, 175, 187, 124, 168, 170, 23, 7, 70, 182, 253, 166, 53, 93, 210, 218, 201, 29, 173, 13, 228, 217, 121, 75, 153, 91, 225, 122, 145, 241, 108, 104, 11, 31, 68, 224, 81, 37, 189, 199, 165, 96, 179, 80, 151, 216, 56, 148, 72, 73, 211, 66, 45, 27, 238, 54, 188, 109, 255, 149, 247, 106, 152, 125, 44, 5, 76, 26, 40, 208, 185, 164, 14, 150, 117, 215, 62, 230, 236, 226 },
{ 153, 242, 154, 54, 111, 28, 80, 61, 68, 175, 87, 45, 22, 133, 31, 76, 207, 163, 14, 159, 174, 50, 30, 15, 234, 100, 132, 255, 20, 1, 86, 246, 92, 123, 192, 49, 43, 71, 51, 219, 6, 59, 226, 185, 53, 74, 44, 125, 145, 81, 13, 206, 64, 130, 110, 24, 240, 201, 115, 158, 118, 161, 228, 157, 19, 162, 16, 211, 148, 191, 66, 194, 101, 26, 199, 129, 227, 41, 127, 104, 108, 231, 120, 137, 179, 241, 106, 37, 152, 32, 97, 79, 200, 243, 122, 178, 39, 254, 63, 165 },
{ 95, 161, 134, 14, 192, 188, 34, 1, 97, 144, 51, 133, 154, 111, 204, 180, 47, 153, 3, 82, 57, 152, 61, 121, 38, 19, 106, 65, 93, 230, 234, 109, 70, 243, 226, 251, 13, 222, 148, 90, 237, 135, 232, 31, 157, 221, 151, 199, 227, 0, 246, 21, 168, 32, 162, 213, 4, 55, 255, 113, 43, 115, 44, 136, 203, 86, 175, 53, 202, 50, 20, 33, 68, 129, 78, 145, 103, 89, 215, 37, 79, 77, 8, 229, 206, 100, 138, 186, 72, 64, 7, 60, 22, 176, 214, 208, 141, 142, 195, 83 },
{ 8, 102, 85, 18, 33, 190, 199, 146, 65, 213, 174, 87, 128, 222, 54, 47, 236, 71, 144, 138, 172, 160, 147, 92, 186, 130, 16, 24, 0, 10, 112, 229, 232, 208, 237, 41, 225, 70, 101, 129, 133, 86, 100, 200, 77, 149, 82, 224, 63, 61, 36, 114, 170, 14, 145, 226, 132, 123, 179, 53, 134, 90, 78, 91, 207, 148, 95, 80, 189, 245, 178, 227, 197, 150, 233, 105, 159, 205, 30, 252, 250, 231, 141, 247, 81, 218, 13, 182, 57, 55, 28, 239, 67, 60, 117, 88, 125, 46, 12, 94 },
{ 43, 109, 106, 18, 153, 17, 162, 203, 101, 113, 29, 100, 0, 185, 200, 32, 175, 10, 219, 57, 224, 194, 171, 160, 158, 116, 36, 110, 207, 251, 137, 227, 135, 126, 54, 236, 205, 81, 42, 141, 3, 232, 69, 104, 241, 117, 73, 234, 84, 140, 77, 134, 226, 60, 21, 114, 195, 93, 229, 184, 242, 11, 173, 14, 41, 142, 209, 136, 249, 85, 91, 248, 97, 181, 239, 26, 197, 102, 215, 99, 165, 228, 44, 53, 96, 4, 243, 198, 211, 179, 58, 94, 217, 2, 178, 9, 121, 83, 128, 112 },
{ 61, 50, 124, 131, 85, 248, 196, 128, 212, 24, 37, 87, 172, 169, 14, 223, 81, 19, 228, 252, 127, 183, 250, 249, 78, 208, 118, 7, 98, 242, 188, 186, 147, 171, 89, 13, 145, 123, 32, 65, 213, 246, 68, 9, 139, 34, 106, 41, 206, 187, 160, 133, 129, 167, 191, 244, 113, 173, 179, 243, 18, 203, 64, 159, 104, 55, 151, 165, 198, 253, 156, 241, 42, 218, 149, 114, 247, 234, 108, 126, 235, 119, 72, 70, 6, 29, 21, 107, 75, 138, 112, 15, 4, 161, 66, 238, 26, 189, 40, 100 },
{ 11, 174, 12, 113, 222, 194, 55, 133, 232, 56, 69, 169, 131, 205, 98, 66, 163, 177, 72, 187, 85, 139, 218, 122, 41, 112, 198, 237, 159, 150, 104, 184, 16, 231, 213, 25, 141, 78, 255, 145, 110, 97, 70, 195, 47, 68, 221, 157, 45, 137, 26, 102, 99, 223, 203, 126, 183, 250, 118, 59, 164, 33, 229, 101, 149, 182, 192, 22, 1, 170, 158, 58, 180, 116, 48, 123, 155, 64, 18, 188, 74, 86, 211, 24, 77, 27, 61, 143, 7, 79, 201, 165, 54, 117, 209, 247, 36, 144, 241, 88 },
{ 178, 205, 73, 37, 29, 129, 217, 14, 114, 202, 180, 238, 236, 127, 35, 54, 152, 30, 153, 67, 196, 206, 187, 168, 80, 60, 27, 201, 193, 156, 15, 204, 39, 105, 253, 99, 95, 210, 240, 94, 10, 163, 87, 28, 70, 197, 85, 182, 191, 66, 220, 46, 52, 96, 147, 243, 242, 254, 124, 213, 207, 13, 44, 134, 65, 113, 31, 48, 20, 88, 64, 230, 223, 71, 172, 100, 171, 121, 78, 138, 77, 101, 8, 75, 216, 24, 158, 3, 218, 43, 26, 61, 97, 151, 120, 1, 175, 56, 161, 115 },
{ 74, 100, 24, 102, 156, 222, 111, 215, 175, 109, 32, 57, 43, 124, 169, 19, 85, 233, 207, 164, 136, 238, 158, 17, 29, 197, 179, 146, 129, 89, 83, 40, 54, 26, 142, 213, 193, 88, 11, 190, 21, 133, 227, 51, 231, 181, 168, 113, 202, 211, 58, 250, 167, 148, 35, 7, 94, 160, 161, 198, 234, 122, 154, 188, 23, 69, 209, 241, 67, 247, 123, 184, 68, 118, 62, 170, 144, 59, 192, 201, 0, 93, 244, 191, 232, 252, 6, 155, 174, 9, 36, 176, 50, 41, 55, 243, 214, 14, 86, 126 },
{ 49, 17, 179, 13, 186, 211, 99, 81, 60, 11, 71, 84, 10, 128, 140, 103, 66, 134, 3, 21, 220, 165, 52, 144, 107, 250, 201, 106, 231, 145, 92, 177, 167, 252, 34, 149, 36, 195, 42, 146, 5, 247, 67, 120, 46, 26, 44, 206, 243, 69, 38, 160, 157, 37, 89, 221, 68, 182, 19, 118, 223, 94, 9, 70, 232, 189, 110, 97, 104, 190, 100, 119, 79, 124, 40, 246, 175, 56, 137, 126, 171, 63, 20, 196, 228, 241, 224, 0, 244, 164, 233, 240, 193, 138, 131, 237, 183, 90, 239, 33 },
{ 240, 126, 161, 231, 101, 160, 164, 214, 202, 97, 201, 157, 129, 132, 250, 107, 88, 232, 146, 209, 246, 251, 200, 110, 154, 103, 112, 62, 44, 65, 230, 78, 138, 36, 33, 120, 167, 222, 37, 227, 81, 90, 252, 139, 77, 218, 184, 192, 86, 134, 70, 173, 204, 168, 221, 80, 27, 241, 89, 166, 67, 233, 150, 7, 32, 40, 105, 56, 225, 71, 21, 8, 6, 223, 187, 151, 127, 87, 24, 189, 206, 85, 185, 14, 174, 61, 190, 193, 66, 100, 38, 22, 106, 31, 235, 79, 25, 69, 254, 163 },
{ 229, 210, 63, 11, 93, 173, 56, 231, 18, 131, 150, 171, 12, 65, 202, 249, 166, 242, 92, 79, 8, 255, 159, 26, 91, 219, 237, 61, 223, 82, 14, 233, 201, 47, 117, 146, 115, 136, 142, 248, 252, 51, 37, 86, 21, 3, 49, 211, 88, 185, 181, 119, 4, 78, 153, 163, 193, 198, 90, 221, 213, 118, 226, 169, 105, 180, 215, 184, 189, 207, 225, 110, 57, 32, 155, 204, 60, 69, 23, 206, 209, 46, 42, 66, 45, 182, 137, 247, 75, 74, 232, 52, 135, 133, 126, 48, 138, 40, 162, 24 },
{ 201, 112, 118, 7, 14, 23, 91, 1, 102, 145, 56, 163, 179, 101, 25, 83, 209, 183, 99, 48, 181, 143, 234, 141, 191, 41, 244, 242, 211, 188, 151, 115, 167, 13, 108, 89, 128, 158, 130, 39, 2, 222, 11, 133, 113, 196, 47, 215, 100, 218, 6, 156, 10, 229, 109, 236, 225, 20, 170, 4, 199, 55, 53, 152, 254, 247, 40, 116, 230, 21, 189, 135, 68, 76, 231, 202, 243, 200, 184, 185, 59, 114, 221, 249, 172, 73, 103, 70, 35, 233, 81, 255, 126, 136, 171, 154, 162, 223, 36, 220 },
{ 183, 63, 80, 248, 197, 19, 92, 160, 10, 15, 237, 82, 121, 173, 57, 112, 12, 225, 244, 181, 113, 17, 42, 169, 204, 198, 51, 209, 127, 106, 133, 25, 228, 211, 79, 101, 36, 233, 175, 86, 68, 128, 154, 241, 192, 23, 215, 217, 240, 70, 163, 59, 245, 196, 230, 159, 212, 77, 38, 232, 222, 72, 220, 206, 107, 4, 78, 122, 251, 33, 131, 213, 73, 179, 246, 123, 49, 46, 84, 239, 205, 108, 39, 89, 6, 37, 250, 201, 31, 120, 231, 170, 202, 5, 187, 117, 0, 234, 40, 236 },
{ 251, 58, 46, 166, 235, 78, 68, 211, 123, 219, 100, 25, 202, 247, 70, 135, 244, 80, 1, 56, 112, 233, 159, 30, 250, 0, 243, 84, 6, 29, 177, 128, 169, 53, 102, 201, 81, 60, 47, 238, 154, 93, 213, 42, 57, 74, 110, 150, 72, 127, 214, 18, 186, 126, 228, 167, 103, 172, 104, 185, 226, 17, 20, 158, 241, 39, 26, 132, 22, 33, 83, 217, 51, 161, 97, 200, 253, 249, 16, 59, 76, 120, 43, 64, 119, 31, 73, 140, 178, 108, 225, 2, 98, 156, 130, 106, 163, 152, 195, 63 },
{ 249, 179, 23, 144, 213, 28, 75, 96, 199, 115, 237, 125, 222, 124, 47, 247, 151, 205, 3, 181, 57, 111, 29, 13, 202, 54, 58, 15, 189, 128, 70, 161, 183, 168, 79, 221, 80, 10, 193, 63, 240, 162, 16, 83, 157, 219, 250, 173, 12, 244, 56, 116, 198, 102, 39, 30, 196, 223, 17, 89, 236, 42, 214, 188, 172, 32, 245, 169, 105, 5, 49, 50, 206, 81, 9, 37, 166, 220, 2, 180, 94, 137, 216, 176, 141, 227, 229, 22, 153, 100, 203, 31, 36, 149, 84, 52, 194, 19, 159, 104 }
};
// Mask/Kernel
int h[H_SIZE][H_SIZE] = {
{0, 1, 0},
{1, -4, 1},
{0, 1, 0}
};
// Convolution in each Input point
int A[I_SIZE][I_SIZE];
int y, x; // Loop Counters
// Number of threads
int num_of_threads = -1;
// Get number of threads - to create - from args
if(argc == 2)
num_of_threads = atoi(argv[1]);
// Set number of threads or error
if(num_of_threads > 0) {
// Create threads
omp_set_num_threads(num_of_threads);
}
else {
printf("\nError: ['Create threads' Failed! Cause: Number of threads <= 0]\n\n");
exit(1);
}
// Print message
printf("\n\nHello from %d threads!\n\n", num_of_threads);
#pragma omp parallel for shared(A, I, h) private(y,x)
for(y=0; y<I_SIZE; y++) {
for(x=0; x<I_SIZE; x++) {
// printf("(y,x):(%d,%d) | Thread: %d \n", y, x, omp_get_thread_num() ); // Print y, x and the thread
A[y][x] = convolution(I, h, y, x); // Get convolution value in point y, x
// printf("A[%d][%d]: %d \n\n", y, x, A[y][x] ); // Print convolution value in each point (y,x)
}
}
y = 0;
x = 0;
printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]);
y = 0;
x = 99;
printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]);
y = 61;
x = 83;
printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]);
y = 68;
x = 12;
printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]);
y = 83;
x = 96;
printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]);
printf("\n\n");
if(SAVETOFILE)
save_to_file(A);
// system("PAUSE");
return 0;
} // End main
/* C O N V O L U T I O N */
int convolution(int I[I_SIZE][I_SIZE], int h[H_SIZE][H_SIZE], int y, int x)
{
int k, j; // Loop Counters
int i1, j1, i2, j2; // Helpers (point)
int res1, res2; // Helpers
int result = 0; // Convolution value in point y, x
for(k=-1; k<2; k++) {
for(j=-1; j<2; j++) {
i1 = j+1;
j1 = k+1;
i2 = y-j;
j2 = x-k;
if( ( (i1 >= 0) && (i1 < H_SIZE) ) && ( (j1 >= 0) && (j1 < H_SIZE) ) ) {
res1 = h[i1][j1];
}
else {
res1 = 0;
}
if( ( (i2 >= 0) && (i2 < I_SIZE) ) && ( (j2 >= 0) && (j2 < I_SIZE) ) ) {
res2 = I[i2][j2];
}
else {
res2 = 0;
}
// A += h[j+1][k+1]*I[y-j][x-k];
result += res1*res2;
}
}
if(result < 0) {
result = 0;
}
if(result > 255) {
result = 255;
}
return result;
// Demo code
// return abs(result);
}
/* S A V E L I S T T O F I L E */
void save_to_file(int A[I_SIZE][I_SIZE])
{
FILE *file_conv_res;
int y, x;
file_conv_res = fopen("convolution_z3.txt", "w");
if(file_conv_res == NULL){
printf("Error: ['Save to file' Failed!]");
exit(1);
}
for(y=0; y<I_SIZE; y++) {
for(x=0; x<I_SIZE; x++) {
fprintf(file_conv_res, "A[%d][%d] = %d \n", y, x, A[y][x]);
}
}
fclose(file_conv_res);
return;
} //End of saving to file.
|
utils.h | #ifndef _UTILS_
#define _UTILS_
#include "common.h"
#include "cusparse.h"
void equal_col_partition(int ngpu, int cols, int* counts, int* displs)
{
int colCount = round((double)cols / ngpu);
for(int i = 0; i < ngpu - 1; i++)
{
counts[i] = colCount;
displs[i] = i * colCount;
}
counts[ngpu - 1] = cols - (ngpu - 1) * colCount;
displs[ngpu - 1] = (ngpu - 1) * colCount;
}
void equal_nnz_partition(int ngpu, int ncols, int nnz, const int* cols, int* counts, int* displs)
{
// count nnz for each col.
int* col_elements = (int*)malloc(sizeof(int) * ncols);
for(int i = 0; i < ncols; i++){
col_elements[i] = cols[i + 1] - cols[i];
}
// first find a lower bound nnz for processes
// by applying binary search.
int low = 0, high = nnz;
while(low < high)
{
// the range of searching
int mid = (low + high) / 2;
// calculate how many process are needed so that each
// process receives up to nnz elements
int sum = 0, need = 0;
for(int i = 0; i < ncols; i++){
if (sum + col_elements[i] > mid) {
sum = col_elements[i];
need++;
} else {
sum += col_elements[i];
}
}
// update the searching range.
if (need < ngpu)
high = mid;
else
low = mid + 1;
}
// split cols by processes according to lower bound.
int sum = 0, counter = 0;
for(int i = 0; i < ncols; i++){
if(sum + col_elements[i] > low){
sum = col_elements[i];
counter++;
counts[counter] = 1;
displs[counter] = i;
} else {
sum += col_elements[i];
counts[counter]++;
}
}
}
// print 1D array
template<typename T>
void print_1darray(T *input, int length)
{
for (int i = 0; i < length; i++)
printf("%i, ", input[i]);
printf("\n");
}
/*
__forceinline__ __device__
static double atomicAdd(double *addr, double val)
{
double old = *addr, assumed;
do
{
assumed = old;
old = __longlong_as_double(
atomicCAS((unsigned long long int*)addr,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
}while(assumed != old);
return old;
}*/
template<typename vT>
__forceinline__ __device__
vT sum_32_shfl(vT sum)
{
#pragma unroll
for(int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1)
sum += __shfl_xor(sum, mask);
return sum;
}
/*struct assembly_timer {
timeval t1, t2;
struct timezone tzone;
void start() {
gettimeofday(&t1, &tzone);
}
double stop() {
gettimeofday(&t2, &tzone);
double elapsedTime = 0;
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
return elapsedTime;
}
};*/
void check_cusparse_kernel(cusparseStatus_t cudaerr)
{
if (cudaerr != CUSPARSE_STATUS_SUCCESS)
printf("cuda kernel fail, err = %s\n", cudaerr);
}
template<typename T>
void swap(T *a , T *b)
{
T tmp = *a;
*a = *b;
*b = tmp;
}
// quick sort key-value pair (child function)
template<typename iT, typename vT>
int partition(iT *key, vT *val, int length, int pivot_index)
{
int i = 0 ;
int small_length = pivot_index;
iT pivot = key[pivot_index];
swap<iT>(&key[pivot_index], &key[pivot_index + (length - 1)]);
swap<vT>(&val[pivot_index], &val[pivot_index + (length - 1)]);
for(; i < length; i++)
{
if(key[pivot_index+i] < pivot)
{
swap<iT>(&key[pivot_index+i], &key[small_length]);
swap<vT>(&val[pivot_index+i],&val[small_length]);
small_length++;
}
}
swap<iT>(&key[pivot_index + length - 1], &key[small_length]);
swap<vT>(&val[pivot_index + length - 1],&val[small_length]);
return small_length;
}
// quick sort key-value pair (main function)
template<typename iT, typename vT>
void quick_sort_key_val_pair(iT *key, vT *val, int length)
{
if(length == 0 || length == 1)
return;
int small_length = partition<iT, vT>(key, val, length, 0) ;
quick_sort_key_val_pair<iT, vT>(key, val, small_length);
quick_sort_key_val_pair<iT, vT>(&key[small_length + 1], &val[small_length + 1], length - small_length - 1);
}
/*
template<typename iT>
void move_block(iT* first,
iT* last,
iT* result)
{
//memcpy(result, first, sizeof(iT) * (last - first));
while (first != last)
{
*result = *first;
++result;
++first;
}
}
template<typename iT, typename vT>
void serial_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
while(key_left_start != key_left_end && key_right_start != key_right_end)
{
bool which = *key_right_start < *key_left_start;
//*key_output++ = std::move(which ? *key_right_start++ : *key_left_start++);
*key_output++ = which ? *key_right_start++ : *key_left_start++;
*val_output++ = which ? *val_right_start++ : *val_left_start++;
}
//std::move( key_left_start, key_left_end, key_output );
move_block<iT>(key_left_start, key_left_end, key_output);
move_block<vT>(val_left_start, val_left_end, val_output);
//std::move( key_right_start, key_right_end, key_output );
move_block<iT>(key_right_start, key_right_end, key_output);
move_block<vT>(val_right_start, val_right_end, val_output);
}
// merge sequences [key_left_start,key_left_end) and [key_right_start,key_right_end)
// to output [key_output, key_output+(key_left_end-key_left_start)+(key_right_end-key_right_start))
template<typename iT, typename vT>
void parallel_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
const size_t MERGE_CUT_OFF = 2000;
if( key_left_end - key_left_start + key_right_end - key_right_start <= MERGE_CUT_OFF)
{
serial_merge<iT, vT>(key_left_start, key_left_end, key_right_start, key_right_end, key_output,
val_left_start, val_left_end, val_right_start, val_right_end, val_output);
}
else
{
iT *key_left_middle, *key_right_middle;
vT *val_left_middle, *val_right_middle;
if(key_left_end - key_left_start < key_right_end - key_right_start)
{
key_right_middle = key_right_start + (key_right_end - key_right_start) / 2;
val_right_middle = val_right_start + (val_right_end - val_right_start) / 2;
key_left_middle = std::upper_bound(key_left_start, key_left_end, *key_right_middle);
val_left_middle = val_left_start + (key_left_middle - key_left_start);
}
else
{
key_left_middle = key_left_start + (key_left_end - key_left_start) / 2;
val_left_middle = val_left_start + (val_left_end - val_left_start) / 2;
key_right_middle = std::lower_bound(key_right_start, key_right_end, *key_left_middle);
val_right_middle = val_right_start + (key_right_middle - key_right_start);
}
iT* key_output_middle = key_output + (key_left_middle - key_left_start) + (key_right_middle - key_right_start);
iT* val_output_middle = val_output + (val_left_middle - val_left_start) + (val_right_middle - val_right_start);
#pragma omp task
parallel_merge<iT, vT>(key_left_start, key_left_middle, key_right_start, key_right_middle, key_output,
val_left_start, val_left_middle, val_right_start, val_right_middle, val_output);
parallel_merge<iT, vT>(key_left_middle, key_left_end, key_right_middle, key_right_end, key_output_middle,
val_left_middle, val_left_end, val_right_middle, val_right_end, val_output_middle);
#pragma omp taskwait
}
}
// sorts [key_start,key_end).
// key_temp[0:key_end-key_start) is temporary buffer supplied by caller.
// result is in [key_start,key_end) if inplace==true,
// otherwise in key_temp[0:key_end-key_start).
template<typename iT, typename vT>
void parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
const size_t SORT_CUT_OFF = 500;
if(key_end - key_start <= SORT_CUT_OFF)
{
//std::stable_sort(key_start, key_end);
int list_length = key_end - key_start;
quick_sort_key_val_pair(key_start, val_start, list_length);
if(!inplace)
{
//std::move( key_start, key_end, key_temp );
move_block<iT>(key_start, key_end, key_temp);
move_block<vT>(val_start, val_end, val_temp);
}
}
else
{
iT* key_middle = key_start + (key_end - key_start) / 2;
vT* val_middle = val_start + (val_end - val_start) / 2;
iT* key_temp_middel = key_temp + (key_middle - key_start);
vT* val_temp_middel = val_temp + (val_middle - val_start);
iT* key_temp_end = key_temp + (key_end - key_start);
vT* val_temp_end = val_temp + (val_end - val_start);
#pragma omp task
parallel_merge_sort<iT, vT>(key_start, key_middle, key_temp,
val_start, val_middle, val_temp,
!inplace);
parallel_merge_sort<iT, vT>(key_middle, key_end, key_temp_middel,
val_middle, val_end, val_temp_middel,
!inplace);
#pragma omp taskwait
if(inplace)
parallel_merge<iT, vT>(key_temp, key_temp_middel, key_temp_middel, key_temp_end, key_start,
val_temp, val_temp_middel, val_temp_middel, val_temp_end, val_start);
else
parallel_merge<iT, vT>(key_start, key_middle, key_middle, key_end, key_temp,
val_start, val_middle, val_middle, val_end, val_temp);
}
}
// OpenMP tasks do not run in parallel unless launched inside a thread team.
// This outer wrapper shows how to create the thread team and run the top-level call.
template<typename iT, typename vT>
void do_parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
// Create a thread team.
#pragma omp parallel
// Make only one thread do the top-level call.
// Other threads in team pick up spawned tasks.
#pragma omp single
{
parallel_merge_sort<iT, vT>(key_start, key_end, key_temp,
val_start, val_end, val_temp,
inplace);
}
}
// merge sort key-value pair (main function)
template<typename iT, typename vT>
void omp_merge_sort_key_val_pair(iT *key, vT *val, int length)
{
//quick_sort_key_val_pair<iT, vT>(key, val, length);
if(length == 0 || length == 1)
return;
// allocate temp space for out-of-place merge sort
iT *key_temp = (iT *)malloc(length * sizeof(iT));
vT *val_temp = (vT *)malloc(length * sizeof(vT));
bool inplace = true;
do_parallel_merge_sort<iT, vT>(&key[0], &key[length], key_temp,
&val[0], &val[length], val_temp,
inplace);
// free temp space
free(key_temp);
free(val_temp);
}*/
// in-place exclusive scan
template<typename T>
void exclusive_scan(T *input, int length)
{
if(length == 0 || length == 1)
return;
T old_val, new_val;
old_val = input[0];
input[0] = 0;
for (int i = 1; i < length; i++)
{
new_val = input[i];
input[i] = old_val + input[i-1];
old_val = new_val;
}
}
// segmented sum
template<typename vT, typename bT>
void segmented_sum(vT *input, bT *bit_flag, int length)
{
if(length == 0 || length == 1)
return;
for (int i = 0; i < length; i++)
{
if (bit_flag[i])
{
int j = i + 1;
while (!bit_flag[j] && j < length)
{
input[i] += input[j];
j++;
}
}
}
}
// reduce sum
template<typename T>
T reduce_sum(T *input, int length)
{
if(length == 0)
return 0;
T sum = 0;
for (int i = 0; i < length; i++)
{
sum += input[i];
}
return sum;
}
#endif
|
4.norace1.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 4
int main() {
int A[N][N][N][N][N][N][N][N][N];
#pragma omp parallel for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
for (int k = 1; k < N; k++)
for (int l = 1; l < N; l++)
for (int m = 1; m < N; m++)
for (int n = 1; n < N; n++)
for (int o = 1; o < N; o++)
for (int p = 1; p < N; p++)
for (int q = 1; q < N; q++)
A[i][j][k][l][m][n][o][p][q] = A[i][j][k][l][m][n][o][p][q];
}
// CHECK: Region is Data Race Free.
// END
|
SpaceFrame v4.3.h | #include <Windows.h>
#include <ctype.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <math.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
using namespace std;
class SpaceFrame
{
private:
double EPS;
double MAXTS;
double MAXLV;
int TNN; // total number of nodes
int NFIN; // number of fixed nodes
int NFRN; // number of free nodes
int NOR; // number of rods
int NOL; // number of loads
int NOS; // number of sections
struct Node // parameters of nodes
{
double XCN; // X coordinate of nodes
double YCN; // Y coordinate of nodes
double ZCN; // Z coordinate of nodes
};
Node *nodes; // parameters of nodes
struct Rod // parameters of nodes
{
int ENR; // the end node number of rods
int BNR; // the beginning node number of rods
double ELASTIC; // elastic modulus
double SHEAR; // shear modulus
double AREA; // area
double IMY; // inertia moment of Y axis
double IMZ; // inertia moment of Z axis
double THETA; // theta the deflection angle of main inertia axis
double LCS[4]; // the length, sine and cosine of rods
double RFE[6]; // the reaction force of the end node
};
Rod *rods; // parameters of nodes
struct Load // parameters of loads
{
int NRL; // the number of rods with load
int PLI; // the plane of the load's in
int KOL; // the kind of load
double VOL; // the value of load
double DLB; // the distance between load and the beginning node
};
Load *loads; // parameters of loads
struct Section // parameters of sections
{
int NRS; // the number of rod with section
double DSB; // the distance between section and the beginning node
double IFS[6]; // the internal force in the section
};
Section *sections; // parameters of sections
double *TotalStiffness; // total stiffness
double *LoadVector; // load vector
double *Displacement; // displacement of nodes
int *IV; // the location of diagonal element
int NSI; // upper limit
int MAXIBDW; // half bandwidth
bool ProgressBar; // open progress bar
bool Parallel; // open parallel
// calculate the length sine and cosine of rods
bool sfLCosSin()
{
for (int k = 0; k < NOR; k++)
{
int i = rods[k].BNR - 1, j = rods[k].ENR - 1; // index of beginning and end nodes of rods
rods[k].LCS[1] = nodes[j].XCN - nodes[i].XCN;
rods[k].LCS[2] = nodes[j].YCN - nodes[i].YCN;
rods[k].LCS[3] = nodes[j].ZCN - nodes[i].ZCN;
rods[k].LCS[0] = sqrt(rods[k].LCS[1] * rods[k].LCS[1] + rods[k].LCS[2] * rods[k].LCS[2] + rods[k].LCS[3] * rods[k].LCS[3]);
if (rods[k].LCS[0] < EPS) // if the length of rod is too small, then return error
{
sfPrintError(8);
return 1;
}
rods[k].LCS[1] = rods[k].LCS[1] / rods[k].LCS[0];
rods[k].LCS[2] = rods[k].LCS[2] / rods[k].LCS[0];
rods[k].LCS[3] = rods[k].LCS[3] / rods[k].LCS[0];
}
return 0;
}
// allocate total stiffness matrix, load vector and displacement vector
bool sfAllocate()
{
int it = 0, mm = 0, dof = 6 * NFRN;
int *peribdw = new int[TNN](); // bandwidth per line in total stiffness matrix
IV = new int[dof]();
for (int i = 0; i < NOR; i++) // for each rod
{
if (rods[i].BNR > NFIN)
{
mm = rods[i].ENR - rods[i].BNR; // bandwidth is end number minus begin number
if (mm > peribdw[rods[i].ENR - 1])
peribdw[rods[i].ENR - 1] = mm; // find the maximum bandwith per line
}
}
for (int i = NFIN; i < TNN; i++) // for each line in total stiffness matrix
{
if (peribdw[i] > MAXIBDW) // find maxim
MAXIBDW = peribdw[i];
for (int j = 1; j <= 6; j++)
{
it = it + 1;
if (it == 1)
IV[it - 1] = 6 * peribdw[i] + j;
else
IV[it - 1] = IV[it - 2] + 6 * peribdw[i] + j;
}
}
MAXIBDW = 6 * MAXIBDW + 5;
NSI = IV[dof - 1];
delete[] peribdw;
TotalStiffness = new double[NSI](); // allocate memory for total stiffness matrix
LoadVector = new double[dof](); // allocate memory for load vector
Displacement = new double[dof](); // allocate memory for displacement vector
return 0;
}
// build total stiffness matrix
bool sfBuildTotalStiff() // ts is total stiffness matrix
{
double us[36] = {0}; // unit stiffness matrix
int p[2] = {0}; // p is a temperary vector for i0j0, dof is the degree of freedom of nods
for (int k = 0; k < NOR; k++)
{
p[0] = 6 * (rods[k].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildUnitStiff(k, i + 1, us)) // build unit stiffness matrix
{
sfPrintError(7);
return 1;
}
for (int m = 0; m < 6; m++)
for (int n = 0; n <= m; n++)
TotalStiffness[IV[(p[i] + m)] + (p[i] + n) - (p[i] + m) - 1] += us[m * 6 + n]; // superpose
}
}
if (p[0] >= 0 && p[1] >= 0)
{
if (sfBuildUnitStiff(k, 3 + 1, us)) // build unit stiffness matrix
{
sfPrintError(7);
return 1;
}
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
TotalStiffness[IV[(p[1] + m)] + (p[0] + n) - (p[1] + m) - 1] += us[m * 6 + n]; // superpose
}
}
for (int i = 0; i < NSI; i++)
if (fabs(TotalStiffness[i]) > MAXTS)
MAXTS = TotalStiffness[i];
return 0;
}
// build unit stiffness matrix
bool sfBuildUnitStiff(int k, int flag, double *us) // k is the number of rods, flag is the index of matrix parts, us is the unit stiffness matrix
{
if (k < 0)
{
sfPrintError(16);
return 0;
}
if (flag < 1 || flag > 4)
{
sfPrintError(16);
return 0;
}
if (us == NULL)
{
sfPrintError(16);
return 0;
}
double rd[36] = {0}, t[36] = {0}, c[36] = {0}, tmp = 0; // rd is local stiffness matrix, t is transpose matrix, c is a temperary matrix
memset(us, 0, 36 * sizeof(double));
if (sfBuildLocalStiff(k, flag, rd)) // build local stiffness matrix
{
sfPrintError(9);
return 1;
}
if (sfBuildTrans(k, t)) // build transpose matrix
{
sfPrintError(10);
return 1;
}
for (int i = 0; i < 6; i++) // transpose matrix times local stiffness matrix, store the result in c
for (int m = 0; m < 6; m++)
{
tmp = t[i * 6 + m];
for (int j = 0; j < 6; j++)
c[i * 6 + j] += tmp * rd[m * 6 + j];
}
for (int i = 0; i < 6; i++) // c times the transposition of transpose matrix, store the result in unit stiff
for (int j = 0; j < 6; j++)
for (int m = 0; m < 6; m++)
us[i * 6 + j] += c[i * 6 + m] * t[j * 6 + m];
return 0;
}
// build local stiffness matrix
bool sfBuildLocalStiff(int k, int flag, double *rd) // k is the number of rods, flag is the number of matrix
{
if (k < 0)
{
sfPrintError(17);
return 0;
}
if (flag < 0 || flag > 4)
{
sfPrintError(17);
return 0;
}
if (rd == NULL)
{
sfPrintError(17);
return 0;
}
double a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0, l = rods[k].LCS[0];
a = rods[k].ELASTIC * rods[k].AREA / l; // EA/1
b = rods[k].SHEAR * (rods[k].IMY + rods[k].IMZ) / l; // GJ(p)/1
c = 4 * rods[k].ELASTIC * rods[k].IMY / l; // 4EJ(y)/1
d = c / 2 * 3 / l; // 6EJ(z)/l/l
e = 2 * d / l; // 12EJ(y)/l/l/l
f = 4 * rods[k].ELASTIC * rods[k].IMZ / l; // 4EJ(z)/l
g = f / 2 * 3 / l; // 6EJ(Z)/l/l
h = 2 * g / l; // 12EJ(z)/l/l/l
switch (flag)
{
case 1: // k11
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 2: // k22
rd[0 * 6 + 0] = a;
rd[1 * 6 + 1] = h;
rd[1 * 6 + 5] = rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = e;
rd[2 * 6 + 4] = rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = b;
rd[4 * 6 + 4] = c;
rd[5 * 6 + 5] = f;
break;
case 3: // k12
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = g;
rd[5 * 6 + 1] = -g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = -d;
rd[4 * 6 + 2] = d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
case 4: // k21
rd[0 * 6 + 0] = -a;
rd[1 * 6 + 1] = -h;
rd[1 * 6 + 5] = -g;
rd[5 * 6 + 1] = g;
rd[2 * 6 + 2] = -e;
rd[2 * 6 + 4] = d;
rd[4 * 6 + 2] = -d;
rd[3 * 6 + 3] = -b;
rd[4 * 6 + 4] = c / 2;
rd[5 * 6 + 5] = f / 2;
break;
default:
break;
}
return 0;
}
// build transpose matrix
bool sfBuildTrans(int k, double *t) // k is the number of rods, t is transpose matrix
{
if (k < 0)
{
sfPrintError(18);
return 0;
}
if (t == NULL)
{
sfPrintError(18);
return 0;
}
double coa = 0, cob = 0, coc = 0, sic = 0, sit = 0, cot = 0, m = 0, n = 0; // co means cosine, si means sine, m and n is temperary variable
memset(t, 0, 36 * sizeof(double));
coa = rods[k].LCS[1]; // cosine alpha
cob = rods[k].LCS[2]; // cosine beta
coc = rods[k].LCS[3]; // cosine gama
sit = sin(rods[k].THETA); // sine theta
cot = cos(rods[k].THETA); // cosine theta
if (fabs(coc - 1) < EPS) // vertical(z axis positive direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = 1;
t[0 * 6 + 1] = t[3 * 6 + 4] = t[1 * 6 + 2] = t[4 * 6 + 5] = sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = cot;
t[0 * 6 + 2] = t[3 * 6 + 5] = -cot;
}
else if (fabs(coc + 1) < EPS) // vertical(z axis negative direction) rods' transpose matrix
{
t[2 * 6 + 0] = t[5 * 6 + 3] = -1;
t[0 * 6 + 1] = t[3 * 6 + 4] = sit;
t[1 * 6 + 2] = t[4 * 6 + 5] = -sit;
t[1 * 6 + 1] = t[4 * 6 + 4] = t[0 * 6 + 2] = t[3 * 6 + 5] = cot;
}
else
{
sic = sqrt(1 - coc * coc); // sine gama
m = coa * coc; // cosine alpha times cosine gama
n = cob * coc; // cosine beta times cosine gama
t[0 * 6 + 0] = t[3 * 6 + 3] = coa;
t[1 * 6 + 0] = t[4 * 6 + 3] = cob;
t[2 * 6 + 0] = t[5 * 6 + 3] = coc;
t[0 * 6 + 1] = t[3 * 6 + 4] = (cob * sit - m * cot) / sic;
t[1 * 6 + 1] = t[4 * 6 + 4] = -(n * cot + coa * sit) / sic;
t[2 * 6 + 1] = t[5 * 6 + 4] = cot * sic;
t[0 * 2 + 2] = t[3 * 6 + 5] = (m * sit + cob * cot) / sic;
t[1 * 6 + 2] = t[4 * 6 + 5] = (n * sit - coa * cot) / sic;
t[2 * 6 + 2] = t[5 * 6 + 5] = -sit * sic;
}
return 0;
}
// build load vector
bool sfBuildLoadVector(double *lv) // lv is the load vector
{
if (lv == 0)
{
sfPrintError(19);
return 0;
}
int rod = 0, p[2] = {0}; // rod is the number of rods, dof is the degree of freedom
double rf[12] = {0}, t[36] = {0}; // rf is the reaction force matrix, t is the transpose matrix, p is a temperary vector for i0j0
for (int i = 0; i < NOL; i++)
{
rod = loads[i].NRL - 1; // the number of rods with load
memset(rf, 0, 12 * sizeof(double)); // zero clearing
if (sfReactionForce(i, &rf[0 * 6], &rf[1 * 6])) // calculate reaction force
{
sfPrintError(11);
return 1;
}
for (int j = 0; j < 6; j++) // add reaction force to RFE
rods[rod].RFE[j] += rf[1 * 6 + j];
if (sfBuildTrans(rod, t)) // build transpose matrix
{
sfPrintError(10);
return 1;
}
p[0] = 6 * (rods[rod].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[rod].ENR - NFIN - 1);
for (int j = 0; j < 2; j++) // add reaction force to load vector
{
if (p[j] >= 0) // determine free node
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
lv[p[j] + m] -= t[m * 6 + n] * rf[j * 6 + n];
}
}
for (int i = 0; i < 6 * NFRN; i++)
if (fabs(lv[i]) > MAXLV)
MAXLV = lv[i];
return 0;
}
// calculate reaction force
bool sfReactionForce(int i, double *rfb, double *rfe) // i is the number of load, rfb and rfe is the reaction force at begining and end of rods
{
if (i < 0)
{
sfPrintError(20);
return 0;
}
if (rfb == NULL)
{
sfPrintError(20);
return 0;
}
if (rfe == NULL)
{
sfPrintError(20);
return 0;
}
double ra = 0, rb = 0, a = 0, b = 0, q = loads[i].VOL, xq = loads[i].DLB; // ra, rb, a and b are middle variable
int rod = loads[i].NRL - 1, pm = loads[i].PLI, t = 0; // rod is the number of rods
if (pm == 0) // load is in XY plane
t = -1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
else if (pm == 1) // load is in XZ plane
t = 1; // The bending moment in the support-reaction equation is positive clockwise, convert it to positive to the coordinate axis
ra = loads[i].DLB / rods[rod].LCS[0]; // x(q) / L
rb = 1 - ra; // 1 - x(q) / L
switch (loads[i].KOL)
{
case 1: // vertical concentrating load
a = rb * rb;
rfb[pm + 1] = -q * rb * (1 + ra - 2 * ra * ra);
rfe[pm + 1] = -q - rfb[pm + 1];
rfb[5 - pm] = t * q * rb * ra * (rods[rod].LCS[0] - xq);
rfe[5 - pm] = -t * q * ra * rb * xq;
break;
case 2: // vertical uniform load
a = q * xq;
b = a * xq / 12;
rfb[pm + 1] = -a * (1 + 0.5 * ra * ra * ra - ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * b * (6 - 8 * ra + 3 * ra * ra);
rfe[5 - pm] = -t * b * (4 * ra - 3 * ra * ra);
break;
case 3: // axial concentrating force when PLI == 0, torque when PLI ==1
rfb[3 * pm] = -q * rb;
rfe[3 * pm] = -q * ra;
break;
case 4: // axial uniform load
a = q * xq;
rfe[3 * pm] = -a * ra / 2;
rfb[3 * pm] = -a - rfe[3 * pm];
break;
case 5: // vertical triangle distributed load
a = q * xq / 2;
b = -0.4 * ra * ra;
rfb[pm + 1] = -2 * a * (0.5 - 0.75 * ra * ra + 0.4 * ra * ra * ra);
rfe[pm + 1] = -a - rfb[pm + 1];
rfb[5 - pm] = t * a * (2 / 3 + b - ra);
rfe[5 - pm] = -t * a * (0.5 * ra + b);
break;
case 6: // concentrating bending moment
rfb[2 - pm] = t * 6 * q * rb * ra / rods[rod].LCS[0];
rfe[2 - pm] = -rfb[2 - pm];
rfb[pm + 4] = t * q * rb * (-1 + 3 * ra);
rfe[pm + 4] = t * q * ra * (2 - 3 * ra);
break;
case 7: // unifrom temperature rise
rfb[0] = q * xq * rods[rod].ELASTIC * rods[rod].AREA;
rfe[0] = -rfb[0];
break;
case 8: // different temperature rise
if (pm == 0)
a = rods[rod].IMZ;
else if (pm == 1)
a = rods[rod].IMY;
rfb[5 - pm] = t * q * 2 * rods[rod].ELASTIC * a * xq;
rfe[5 - pm] = -rfb[5 - pm];
break;
default:
break;
}
return 0;
}
// solve equation of matrix by conjugate gradient
bool sfConjugateGradient(double *A, double *b, double *x, int N)
{
if (A == NULL)
{
sfPrintError(12);
return 1;
}
else if (b == NULL)
{
sfPrintError(12);
return 1;
}
else if (x == NULL)
{
sfPrintError(12);
return 1;
}
else if (N == 0)
{
sfPrintError(12);
return 1;
}
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
cout << "\rSolving equation [ 0%% ][ ]";
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; 1; ++n)
{
// z = A * p
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt / EPS) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
cout << "[ " << percent << "%% ]";
cout << "[";
for (int i = 0; i < 49; i++)
if (i < percent / 2)
cout << "=";
else
cout << " ";
cout << "]";
}
else
{
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
cout << "\rSolving equation done [ 100%% ][=================================================]\n";
free(r);
free(p);
free(z);
return 0;
}
// solve equation of matrix by conjugate gradient parallel
bool sfConjugateGradientPar(double *A, double *b, double *x, int N)
{
if (A == NULL)
{
sfPrintError(12);
return 1;
}
else if (b == NULL)
{
sfPrintError(12);
return 1;
}
else if (x == NULL)
{
sfPrintError(12);
return 1;
}
else if (N == 0)
{
sfPrintError(12);
return 1;
}
double *r = NULL, *p = NULL, *z = NULL;
double gamma = 0, gamma_new = 0, gamma_new_sqrt = 0, alpha = 0, beta = 0;
int percent = 0, percent_new = 0;
if (ProgressBar)
{
cout << "\rSolving equation [ 0%% ][ ]";
}
r = (double *)malloc(N * sizeof(double));
memset(r, 0, sizeof(double));
p = (double *)malloc(N * sizeof(double));
memset(p, 0, sizeof(double));
z = (double *)malloc(N * sizeof(double));
memset(z, 0, sizeof(double));
for (int i = 0; i < NSI; i++)
A[i] = A[i] / MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] / MAXLV;
// x = [0 ... 0]
// r = b - A * x
// p = r
// gamma = r' * r
gamma = 0.0;
#pragma omp parallel for reduction(+ \
: gamma)
for (int i = 0; i < N; ++i)
{
x[i] = 0.0;
r[i] = b[i];
p[i] = r[i];
gamma += r[i] * r[i];
}
for (int n = 0; true; ++n)
{
// z = A * p
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
z[i] = 0.0;
for (int j = 0; j < N; j++)
{
if (i == j)
{
z[i] += A[IV[i] - 1] * p[j];
}
else if (j > i)
{
if ((IV[j] - j + i) > IV[j - 1])
z[i] += A[IV[j] - j + i - 1] * p[j];
else
z[i] += 0;
}
else if (i > j)
{
if ((IV[i] - i + j) > IV[i - 1])
z[i] += A[IV[i] - i + j - 1] * p[j];
else
z[i] += 0;
}
}
}
// alpha = gamma / (p' * z)
alpha = 0.0;
#pragma omp parallel for reduction(+ \
: alpha)
for (int i = 0; i < N; ++i)
alpha += p[i] * z[i];
alpha = gamma / alpha;
// x = x + alpha * p
// r = r - alpha * z
// gamma_new = r' * r
gamma_new = 0.0;
#pragma omp parallel for reduction(+ \
: gamma_new)
for (int i = 0; i < N; ++i)
{
x[i] += alpha * p[i];
r[i] -= alpha * z[i];
gamma_new += r[i] * r[i];
}
gamma_new_sqrt = sqrt(gamma_new);
if (gamma_new_sqrt < EPS)
break;
if (ProgressBar)
{
percent_new = (int)((1 - log10(gamma_new_sqrt * 1e15) / 16) * 100);
if (percent_new > percent)
{
percent = percent_new;
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
cout << "[ " << percent << "%% ]";
cout << "[";
for (int i = 0; i < 49; i++)
if (i < percent / 2)
cout << "=";
else
cout << " ";
cout << "]";
}
else
{
cout << "\rSolving equation ";
for (int i = 0; i <= 4; i++)
if (i <= n % 4)
cout << ".";
else
cout << " ";
}
}
beta = gamma_new / gamma;
// p = r + (gamma_new / gamma) * p;
#pragma omp parallel for
for (int i = 0; i < N; ++i)
p[i] = r[i] + beta * p[i];
// gamma = gamma_new
gamma = gamma_new;
}
for (int i = 0; i < NSI; i++)
A[i] = A[i] * MAXTS;
for (int i = 0; i < N; i++)
b[i] = b[i] * MAXLV;
for (int i = 0; i < N; i++)
x[i] = x[i] * MAXLV / MAXTS;
if (ProgressBar)
{
cout << "\rSolving equation done [ 100%% ][=================================================]\n";
}
free(r);
free(p);
free(z);
return 0;
}
// calculate internal force of rods
bool sfInternalForce(int mm, int k, double xp) // m is the number of sections, k is the actual number of rods, xp is the distance between the section and the begining of rods
{
if (mm < 0)
{
sfPrintError(21);
return 0;
}
if (k < 0)
{
sfPrintError(21);
return 0;
}
double tf[6] = {0}; // tf is temperary variable
sections[mm].IFS[0] = +rods[k - 1].RFE[0]; // calculate internal force cause by reaction force at the end of rods
sections[mm].IFS[1] = -rods[k - 1].RFE[1];
sections[mm].IFS[2] = -rods[k - 1].RFE[2];
sections[mm].IFS[3] = +rods[k - 1].RFE[3];
sections[mm].IFS[4] = -rods[k - 1].RFE[4] + rods[k - 1].RFE[2] * (rods[k - 1].LCS[0] - xp);
sections[mm].IFS[5] = +rods[k - 1].RFE[5] + rods[k - 1].RFE[1] * (rods[k - 1].LCS[0] - xp);
for (int i = 0; i < NOL; i++) // for every rods
if (loads[i].NRL == k) // if load is on rod k
{
memset(tf, 0, 6 * sizeof(double)); // zero clear tf
if (sfCtlInternalForce(i, xp, tf)) // calculate internal force of cantilever beam
{
sfPrintError(13);
return 1;
}
for (int j = 0; j < 6; j++) // add internal force of cantilever into IFR
sections[mm].IFS[j] += tf[j];
}
if (sfDisplacementForce(k, tf)) // calculate end force
{
sfPrintError(14);
return 1;
}
sections[mm].IFS[0] -= tf[0]; // calculate section force cause by end force
sections[mm].IFS[1] += tf[1];
sections[mm].IFS[2] += tf[2];
sections[mm].IFS[3] -= tf[3];
sections[mm].IFS[4] += tf[4] + tf[2] * xp;
sections[mm].IFS[5] -= tf[5] - tf[1] * xp;
return 0;
}
// calculate internal force of cantilever beam
bool sfCtlInternalForce(int i, double xp, double *tf) // i is the number of load, xp is the distance between the section and the begining of rod, tf is internal force
{
if (i < 0)
{
sfPrintError(22);
return 0;
}
if (tf == NULL)
{
sfPrintError(22);
return 0;
}
double xq = loads[i].DLB, t = xq - xp, r = xp / xq, q = loads[i].VOL; // t and r are temperary variables
int e = loads[i].PLI;
switch (loads[i].KOL) // calculate section force according to kind of loads
{
case 1:
if (xp < xq)
{
tf[e + 1] = -q;
tf[5 - e] = q * t;
}
break;
case 2:
if (xp < xq)
{
tf[e + 1] = -q * t;
tf[5 - e] = 0.5 * q * t * t;
}
break;
case 3:
if (xp < xq)
tf[3 * e] = q;
break;
case 4:
if (xp < xq)
tf[3 * e] = q * t;
break;
case 5:
if (xp < xq)
{
tf[e + 1] = -q * (1 + r) * t / 2;
tf[5 - e] = q * t * t * (2 + r) / 6;
}
break;
case 6:
if (xp < xq)
tf[e + 4] = (2 * e - 1) * q;
break;
case 7: // temperature change don't generate internal force on cantilever beam
break;
case 8:
break;
default:
break;
}
return 0;
}
// calculate internal force of displacement
bool sfDisplacementForce(int k, double *tref) // k is the actual number of rods, tref is the end force of rods
{
if (k < 1)
{
sfPrintError(23);
return 0;
}
if (tref == NULL)
{
sfPrintError(23);
return 0;
}
int p[2] = {0}; // p is a temperary vector for i0j0
double rd[36] = {0}, rdb[36] = {0}, t[36] = {0}; // rd
memset(tref, 0, 6 * sizeof(double));
if (sfBuildTrans(k - 1, t)) // calculate transpose matrix
{
sfPrintError(10);
return 1;
}
p[0] = 6 * (rods[k - 1].BNR - NFIN - 1); // match the displacement with nods
p[1] = 6 * (rods[k - 1].ENR - NFIN - 1);
for (int i = 0; i < 2; i++)
{
if (p[i] >= 0) // determine free node
{
if (sfBuildLocalStiff(k - 1, 2 * i + 1, rd)) // build unit stiffness matrix
{
sfPrintError(9);
return 1;
}
memset(rdb, 0, 36 * sizeof(double)); // zero clean rdb
for (int j = 0; j < 6; j++) // rd times transposition of transpose matrix
for (int m = 0; m < 6; m++)
for (int n = 0; n < 6; n++)
rdb[j * 6 + m] += rd[j * 6 + n] * t[m * 6 + n];
for (int j = 0; j < 6; j++) // rdb times DON
for (int m = 0; m < 6; m++)
tref[j] += rdb[j * 6 + m] * Displacement[p[i] + m];
}
else // fixed node
for (int j = 0; j < 3; j++)
tref[j] += 0;
}
return 0;
}
// print"----------------------------------------"
bool sfPrintLine()
{
cout << "-------------------------------------------------------------------------------------------------------------------------------\n";
return 0;
}
// print"****************************************"
bool sfPrintLine2()
{
cout << "**************************************************************************\n";
return 0;
}
// print error
bool sfPrintError(int error)
{
cout << "ERROR:\t";
switch (error)
{
case 1:
cout << "Data input failed!\n";
break;
case 2:
cout << "Building total stiffness matrix failed!\n";
break;
case 3:
cout << "Building load vector failed!\n";
break;
case 4:
cout << "Solving equation failed!\n";
break;
case 5:
cout << "Calculating internal force failed!\n";
break;
case 6:
cout << "Calculating length, cosine and sine failed!\n";
break;
case 7:
cout << "Building unit stiffness matrix failed!\n";
break;
case 8:
cout << "The length of a rod is too small!\n";
break;
case 9:
cout << "Building local stiffness matrix filed!\n";
break;
case 10:
cout << "Building transpose matrix failed!\n";
break;
case 11:
cout << "Calculating reaction force failed!\n";
break;
case 12:
cout << "There is something wrong in the equation!\n";
break;
case 13:
cout << "calculating internal force of cantilever beam failed!\n";
break;
case 14:
cout << "Calculating end force failed!\n";
break;
case 15:
cout << "Allocating total stiffness matrix failed!\n";
break;
case 16:
cout << "There is something wrong in building unit stiffness matrix!\n";
break;
case 17:
cout << "There is something wrong in building local stiffness matrix!\n";
break;
case 18:
cout << "There is something wrong in building transpose matrix failed!\n";
break;
case 19:
cout << "There is something wrong in building load vector!\n";
break;
case 20:
cout << "There is something wrong in calculating reaction force!\n";
break;
case 21:
cout << "There is something wrong in calculating internal force!\n";
break;
case 22:
cout << "There is something wrong in calculating internal force of cantilever!\n";
break;
case 23:
cout << "There is something wrong in calculating internal force of displacement!\n";
break;
case 24:
cout << "!\n";
break;
case 25:
cout << "!\n";
break;
default:
break;
}
cout << "There is at least one error in your file, please check it and try it one more time.\n";
return 0;
}
public:
SpaceFrame();
SpaceFrame(SpaceFrame &);
~SpaceFrame();
// read data from .csv
bool sfInput();
// calculate
bool sfCalculate(bool, bool, double);
// output data
bool sfOutput();
// create circular structure
bool sfCircularStructure(int, int, int);
};
SpaceFrame::SpaceFrame()
{
EPS = 1e-15;
MAXTS = 0;
MAXLV = 0;
TNN = 0; // total number of nodes
NFIN = 0; // number of fixed nodes
NFRN = 0; // number of free nodes
NOR = 0; // number of rods
NOL = 0; // number of loads
NOS = 0; // number of sections
nodes = NULL; // parameters of nodes
rods = NULL; // parameters of rods
loads = NULL; // parameters of loads
sections = NULL; // parameters of sections
TotalStiffness = NULL; // total stiffness
LoadVector = NULL; // load vector
Displacement = NULL; // the displacement of nodes
IV = NULL; // the location of diagonal element
NSI = 0; // upper limit
MAXIBDW = 0; // half bandwidth
ProgressBar = 1; // open progress bar
Parallel = 1; // open parallel
}
SpaceFrame::SpaceFrame(SpaceFrame &Frame)
{
EPS = Frame.EPS;
MAXTS = Frame.MAXTS;
MAXLV = Frame.MAXLV;
TNN = Frame.TNN;
NFIN = Frame.NFIN;
NFRN = Frame.NFRN;
NOR = Frame.NOR;
NOL = Frame.NOL;
NOS = Frame.NOS;
nodes = new Node[TNN]();
if (Frame.nodes != NULL)
memcpy(nodes, Frame.nodes, TNN * sizeof(Node));
rods = new Rod[NOR]();
if (Frame.rods != NULL)
memcpy(rods, Frame.rods, NOR * sizeof(Rod));
loads = new Load[NOL]();
if (Frame.loads != NULL)
memcpy(loads, Frame.loads, NOL * sizeof(Load));
sections = new Section[NOS]();
if (Frame.sections != NULL)
memcpy(sections, Frame.sections, NOS * sizeof(Section));
int dof = 6 * NFRN;
IV = new int[dof]();
if (Frame.IV != NULL)
memcpy(IV, Frame.IV, dof * sizeof(int));
NSI = Frame.NSI;
MAXIBDW = Frame.MAXIBDW;
TotalStiffness = new double[NSI]();
if (Frame.TotalStiffness != NULL)
memcpy(TotalStiffness, Frame.TotalStiffness, NSI * sizeof(double));
LoadVector = new double[dof]();
if (Frame.LoadVector != NULL)
memcpy(LoadVector, Frame.LoadVector, dof * sizeof(double));
Displacement = new double[dof]();
if (Frame.Displacement != NULL)
memcpy(Displacement, Frame.Displacement, dof * sizeof(double));
ProgressBar = Frame.ProgressBar;
Parallel = Frame.Parallel;
}
SpaceFrame::~SpaceFrame()
{
delete[] nodes;
nodes = NULL;
delete[] rods;
rods = NULL;
delete[] loads;
loads = NULL;
delete[] sections;
sections = NULL;
delete[] TotalStiffness;
TotalStiffness = NULL;
delete[] LoadVector;
LoadVector = NULL;
delete[] Displacement;
Displacement = NULL;
delete[] IV;
IV = NULL;
}
bool SpaceFrame::sfInput()
{
FILE *fp = NULL; // Define the file point
char *line = 0, *data = 0; // Define the line string and separated string
char temporSpace[1000000]; // Apply for temporary storage space
int rowIndex = 0, columnIndex = 0; // Reset the number of rows to zero, reset the number of columns to zero
const char DIVIDE[] = ","; // Set the separater as a ','
if ((fp = fopen("source&result/sf_test.csv", "r")) == NULL) // Start the process when the file opens successfully
{
cout << "There is no such file!";
return 0;
}
fseek(fp, 0L, SEEK_SET); // Locate file point to the first line
while ((line = fgets(temporSpace, sizeof(temporSpace), fp)) != NULL) // The loop continues when the end of the file is not read
{
data = strtok(line, DIVIDE); // Split strings with a ',' as a separator
while (data != NULL) // Read the data of each row
{
if (strcmp(data, "END") == 0) // When the keyword 'END' is read, the reading process will be shut down
{
fclose(fp); // Close the file
fp = NULL; // Reset the file point
cout << "Inputing data succeed!\n";
return 0;
}
if (columnIndex++ == 0) // Skip the saving of the first column
{
data = strtok(NULL, DIVIDE); // Reset data
continue;
}
switch (rowIndex) // Store variables of each column in different ways
{
case 0:
break;
case 1:
if (columnIndex == 2)
TNN = atoi(data);
break;
case 2:
if (columnIndex == 2)
NFIN = atoi(data);
NFRN = TNN - NFIN;
break;
case 3:
if (columnIndex == 2)
NOR = atoi(data);
break;
case 4:
if (columnIndex == 2)
NOL = atoi(data);
break;
case 5:
if (columnIndex == 2)
{
NOS = atoi(data);
if (nodes != NULL)
this->~SpaceFrame();
nodes = new Node[TNN]();
rods = new Rod[NOR]();
loads = new Load[NOL]();
sections = new Section[NOS]();
}
break;
case 6:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].XCN = atof(data);
break;
case 7:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].YCN = atof(data);
break;
case 8:
if (columnIndex - 2 < TNN)
nodes[columnIndex - 2].ZCN = atof(data);
break;
case 9:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].BNR = atoi(data);
break;
case 10:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].ENR = atoi(data);
break;
case 11:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].ELASTIC = atof(data);
break;
case 12:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].SHEAR = atof(data);
break;
case 13:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].AREA = atof(data);
break;
case 14:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].IMY = atof(data);
break;
case 15:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].IMZ = atof(data);
break;
case 16:
if (columnIndex - 2 < NOR)
rods[columnIndex - 2].THETA = atof(data);
break;
case 17:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].NRL = atoi(data);
break;
case 18:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].PLI = atoi(data);
break;
case 19:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].KOL = atoi(data);
break;
case 20:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].VOL = atof(data);
break;
case 21:
if (columnIndex - 2 < NOL)
loads[columnIndex - 2].DLB = atof(data);
break;
case 22:
if (columnIndex - 2 < NOS)
sections[columnIndex - 2].NRS = atoi(data);
break;
case 23:
if (columnIndex - 2 < NOS)
sections[columnIndex - 2].DSB = atof(data);
break;
} // input finished
data = strtok(NULL, DIVIDE); // Reset data
}
rowIndex++; // RowIndex steps forward once
columnIndex = 0; // Reset columnIndex
}
fclose(fp); // Close the file
fp = NULL; // Reset the file point
return 0;
}
bool SpaceFrame::sfOutput()
{
if (true) // console
{
sfPrintLine();
cout << setw(80) << "Calculation Of Space Rigid Frame\n";
sfPrintLine();
cout << "| TNN = " << setw(9) << TNN << " | NFIN = " << setw(8) << NFIN << " | NFRN = " << setw(8) << NFRN << " | NOR = " << setw(9) << NOR << " | NOL = " << setw(9) << NOL << " | NOS = " << setw(9) << NOS << " | |\n";
sfPrintLine();
cout << "| Nodes | Coordinate-X | Coordinate-Y | Coordinate-Z | | |\n";
for (int i = 0; i < TNN; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << nodes[i].XCN << " | " << setw(15) << nodes[i].YCN << " | " << setw(15) << nodes[i].ZCN << " | | | |\n";
sfPrintLine();
cout << "| Rods | Left - Right | Elastic Modulus | Shear modulus | Area | Inertia Y Axis | Inertia Z Axis |\n";
for (int i = 0; i < NOR; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(6) << rods[i].BNR << " - " << left << setw(6) << rods[i].ENR << " | " << right << setw(15) << rods[i].ELASTIC << " | " << setw(15) << rods[i].SHEAR << " | " << setw(15) << rods[i].AREA << " | " << setw(15) << rods[i].IMY << " | " << setw(15) << rods[i].IMZ << " |\n";
sfPrintLine();
cout << "| Sections | Rods | Distance | | | |\n";
for (int i = 0; i < NOS; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << sections[i].NRS << " | " << setw(15) << sections[i].DSB << " | | | | |\n";
sfPrintLine();
cout << "| Nodes | Displacement-X | Displacement-Y | Displacement-Z | Diversion-X | Diversion-Y | Diversion-Z |\n";
for (int i = NFIN; i < TNN; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << Displacement[6 * (i - NFIN)] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 1] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 2] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 3] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 4] << " | " << setw(15) << Displacement[6 * (i - NFIN) + 5] << " |\n";
sfPrintLine();
cout << "| Sections | Axial force-X | Shear force-Y | Shear force-Z | Torque-X | Bending-Y | Bending-Z |\n";
for (int i = 0; i < NOS; i++)
cout << "| " << setw(15) << i + 1 << " | " << setw(15) << sections[i].IFS[0] << " | " << setw(15) << sections[i].IFS[1] << " | " << setw(15) << sections[i].IFS[2] << " | " << setw(15) << sections[i].IFS[3] << " | " << setw(15) << sections[i].IFS[4] << " | " << setw(15) << sections[i].IFS[5] << " |\n";
sfPrintLine();
}
if (true) // file
{
ofstream fout("source&result/sfResultClass.csv", ios::out);
fout << setw(80) << "Calculation Of Space Rigid Frame,\n";
fout << "TNN = " << setw(9) << TNN << " , NFIN = " << setw(8) << NFIN << " , NFRN = " << setw(8) << NFRN << " , NOR = " << setw(9) << NOR << " , NOL = " << setw(9) << NOL << " , NOS = " << setw(9) << NOS << " , ,\n";
fout << "Nodes , Coordinate-X , Coordinate-Y , Coordinate-Z , , ,\n";
for (int i = 0; i < TNN; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << nodes[i].XCN << " , " << setw(15) << nodes[i].YCN << " , " << setw(15) << nodes[i].ZCN << " , , , ,\n";
fout << "Rods , Left - Right , Elastic Modulus , Shear modulus , Area , Inertia Y Axis , Inertia Z Axis ,\n";
for (int i = 0; i < NOR; i++)
fout << setw(15) << i + 1 << " , " << setw(6) << rods[i].BNR << " - " << left << setw(6) << rods[i].ENR << " , " << right << setw(15) << rods[i].ELASTIC << " , " << setw(15) << rods[i].SHEAR << " , " << setw(15) << rods[i].AREA << " , " << setw(15) << rods[i].IMY << " , " << setw(15) << rods[i].IMZ << " ,\n";
fout << "Sections , Rods , Distance , , , ,\n";
for (int i = 0; i < NOS; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << sections[i].NRS << " , " << setw(15) << sections[i].DSB << " , , , , ,\n";
fout << "Nodes , Displacement-X , Displacement-Y , Displacement-Z , Diversion-X , Diversion-Y , Diversion-Z ,\n";
for (int i = NFIN; i < TNN; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << Displacement[6 * (i - NFIN)] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 1] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 2] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 3] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 4] << " , " << setw(15) << Displacement[6 * (i - NFIN) + 5] << " ,\n";
fout << "Sections , Axial force-X , Shear force-Y , Shear force-Z , Torque-X , Bending-Y , Bending-Z ,\n";
for (int i = 0; i < NOS; i++)
fout << setw(15) << i + 1 << " , " << setw(15) << sections[i].IFS[0] << " , " << setw(15) << sections[i].IFS[1] << " , " << setw(15) << sections[i].IFS[2] << " , " << setw(15) << sections[i].IFS[3] << " , " << setw(15) << sections[i].IFS[4] << " , " << setw(15) << sections[i].IFS[5] << " ,\n";
fout.close();
}
return 0;
}
bool SpaceFrame::sfCalculate(bool parallel = true, bool progress_bar = true, double eps = 1e-15)
{
ProgressBar = progress_bar, Parallel = parallel, EPS = eps;
if (sfLCosSin()) // calculate the length, cosine and sine of all rods
{
sfPrintError(6);
return 1;
}
else
cout << "Calculating length, cosine and sine succeed!\n";
if (sfAllocate())
{
sfPrintError(15);
return 0;
}
else
cout << "Allocating Variable Bandwith Matrix succeed!\n";
if (sfBuildTotalStiff()) // build total stiffness matrix
{
sfPrintError(2);
return 1;
}
else
cout << "Building total stiffness matrix succeeded!\n";
if (sfBuildLoadVector(LoadVector)) // build load stiffness vector
{
sfPrintError(3);
return 1;
}
else
cout << "Building load vector succeeded!\n";
if (Parallel)
{
if (sfConjugateGradientPar(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
{
sfPrintError(4);
return 1;
}
else
cout << "Solving equation succeeded!\n";
}
else
{
if (sfConjugateGradient(TotalStiffness, LoadVector, Displacement, 6 * NFRN)) // solve matrix equation
{
sfPrintError(4);
return 1;
}
else
cout << "Solving equation succeeded!\n";
}
for (int i = 0; i < NOS; i++)
if (sfInternalForce(i, sections[i].NRS, sections[i].DSB)) // calculate the internal force of each rods
{
sfPrintError(5);
return 1;
}
cout << "Outputing data succeed!\n";
return 0;
}
bool SpaceFrame::sfCircularStructure(int m, int n, int l)
{
ofstream fout("source&result/sf_test.csv", ios::out);
fout << "Stress Test, degree of freedom is " << ((m + 1) * (n + 1) * (l + 1) - (m + 1) * (n + 1)) * 6 << ",\n";
fout << "TNN," << (m + 1) * (n + 1) * (l + 1) << ",\n";
fout << "NFIN," << (m + 1) * (n + 1) << ",\n";
int nor = ((2 * m + 1) * (2 * n + 1) - m * n) * l;
fout << "NOR," << nor << ",\n";
fout << "NOL," << (m + 1) * (n + 1) << ",\n";
fout << "NOS," << (m + 1) * (n + 1) << ",\n";
fout << "XCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << k << ",";
fout << "\n";
fout << "YCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << j << ",";
fout << "\n";
fout << "ZCN,";
for (int i = 0; i < l + 1; i++)
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m + 1; k++)
fout << i << ",";
fout << "\n";
fout << "BNR,";
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fout << j + 1 + i * (m + 1) * (n + 1) << ",";
for (int j = 0; j < (m + 1) * n; j++)
fout << j + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fout << k + 1 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1) << ",";
}
fout << "\n";
fout << "ENR,";
for (int i = 0; i < l; i++)
{
for (int j = 0; j < (m + 1) * (n + 1); j++)
fout << j + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < (m + 1) * n; j++)
fout << j + 1 + m + 1 + (i + 1) * (m + 1) * (n + 1) << ",";
for (int j = 0; j < n + 1; j++)
for (int k = 0; k < m; k++)
fout << k + 2 + j * (m + 1) + (i + 1) * (m + 1) * (n + 1) << ",";
}
fout << "\n";
fout << "ELASTIC,";
for (int i = 0; i < nor; i++)
fout << 210000000 + 100000 * (rand() % 1000) << ",";
fout << "\n";
fout << "SHEAR,";
for (int i = 0; i < nor; i++)
fout << 80769000 << ",";
fout << "\n";
fout << "AREA,";
for (int i = 0; i < nor; i++)
fout << 0.007854 << ",";
fout << "\n";
fout << "IMY,";
for (int i = 0; i < nor; i++)
fout << 0.0000040001 + 0.0000000001 * (rand() % 10000) << ",";
fout << "\n";
fout << "IMZ,";
for (int i = 0; i < nor; i++)
fout << 0.0000040001 + 0.0000000001 * (rand() % 10000) << ",";
fout << "\n";
fout << "THETA,";
for (int i = 0; i < nor; i++)
fout << 0 << ",";
fout << "\n";
fout << "NRL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << i + 1 + ((2 * m + 1) * (2 * n + 1) - m * n) * (l - 1) << ",";
fout << "\n";
fout << "PLI,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 0 << ",";
fout << "\n";
fout << "KOL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 3 << ",";
fout << "\n";
fout << "VOL,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 1000 + rand() % 1000 << ",";
fout << "\n";
fout << "DLB,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 1 << ",";
fout << "\n";
fout << "NRS,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << i + 1 << ",";
fout << "\n";
fout << "DSB,";
for (int i = 0; i < (m + 1) * (n + 1); i++)
fout << 0.5 << ",";
fout << "\nEND,";
fout.close();
return 0;
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/token-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if ((double) image->delay > floor(geometry_info.rho+0.5))
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
}
else
if ((flags & LessValue) != 0)
{
if ((double) image->delay < floor(geometry_info.rho+0.5))
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
if ((flags & SigmaValue) != 0)
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict append_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) CastDoubleToLong(floor(scale*
image->page.width+0.5));
clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5));
clone_image->tile_offset.x=CastDoubleToLong(ceil(scale*
image->tile_offset.x-0.5));
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) CastDoubleToLong(floor(scale*
image->page.height+0.5));
clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5));
clone_image->tile_offset.y=CastDoubleToLong(ceil(scale*
image->tile_offset.y-0.5));
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
const IndexPacket
*magick_restrict source_indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
const char
*p;
int
c;
MagickBooleanType
canonical;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse)
return(strlen(filename));
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MaxTextExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
char
*r;
ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),value,(size_t)
(MaxTextExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(value)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const void
*pixels;
MagickBooleanType
status;
MagickSizeType
length;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset((void *) pixels,0,(size_t) length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,sizeof(PixelPacket));
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
indexes[x]=0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize",
image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth == 0)
{
image->depth=8;
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
if (image->depth > (8*sizeof(MagickSizeType)))
{
image->depth=8*sizeof(MagickSizeType);
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
if (*extension != '\0')
{
char
path[MaxTextExtent];
/*
Base path sans any compression extension.
*/
GetPathComponent(image_info->filename,BasePathSansCompressExtension,path);
GetPathComponent(path,ExtensionPath,extension);
}
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*extension != '\0') && (IsGlob(extension) == MagickFalse))
{
MagickFormatType
format_type;
ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,filename);
else
GetPathComponent(image_info->filename,SubcanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(filename);
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(filename);
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) memset(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
const PixelPacket
*p;
ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
const PixelPacket
*p;
ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.blue_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.green_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.white_point.y=geometry_info.sigma;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
int blas_server_avail = 0;
static void * blas_thread_buffer[MAX_CPU_NUMBER];
void goto_set_num_threads(int num_threads) {
int i=0;
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
//adjust buffer for each thread
for(i=0; i<blas_cpu_number; i++){
if(blas_thread_buffer[i]==NULL){
blas_thread_buffer[i]=blas_memory_alloc(2);
}
}
for(; i<MAX_CPU_NUMBER; i++){
if(blas_thread_buffer[i]!=NULL){
blas_memory_free(blas_thread_buffer[i]);
blas_thread_buffer[i]=NULL;
}
}
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
int i=0;
blas_get_cpu_number();
blas_server_avail = 1;
for(i=0; i<blas_num_threads; i++){
blas_thread_buffer[i]=blas_memory_alloc(2);
}
for(; i<MAX_CPU_NUMBER; i++){
blas_thread_buffer[i]=NULL;
}
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
int i=0;
blas_server_avail = 0;
for(i=0; i<MAX_CPU_NUMBER; i++){
if(blas_thread_buffer[i]!=NULL){
blas_memory_free(blas_thread_buffer[i]);
blas_thread_buffer[i]=NULL;
}
}
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
} else {
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
}
}
static void exec_threads(blas_queue_t *queue){
void *buffer, *sa, *sb;
int pos=0, release_flag=0;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
pos = omp_get_thread_num();
buffer = blas_thread_buffer[pos];
//fallback
if(buffer==NULL) {
buffer = blas_memory_alloc(2);
release_flag=1;
}
if (sa == NULL) {
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
queue->sa=sa;
}
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
} else {
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
}
queue->sb=sb;
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (release_flag) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
#pragma omp parallel for schedule(static)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i]);
}
return 0;
}
#endif
|
sufsort_priv.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <cub/cub.cuh>
#include <mgpuhost.cuh>
#include <moderngpu.cuh>
#include <nvbio/strings/string_set.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/cuda/sort.h>
#include <nvbio/basic/cuda/timer.h>
#include <nvbio/basic/cuda/ldg.h>
#include <nvbio/basic/cuda/primitives.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#if defined(PLATFORM_X86)
#include <emmintrin.h> // SSE intrinsics
#endif
namespace nvbio {
namespace priv {
template <uint32 BITS> struct word_selector {};
template <> struct word_selector<4> { typedef uint8 type; };
template <> struct word_selector<6> { typedef uint8 type; };
template <> struct word_selector<8> { typedef uint8 type; };
template <> struct word_selector<10> { typedef uint8 type; };
template <> struct word_selector<12> { typedef uint16 type; };
template <> struct word_selector<14> { typedef uint16 type; };
template <> struct word_selector<16> { typedef uint16 type; };
template <> struct word_selector<18> { typedef uint16 type; };
template <> struct word_selector<20> { typedef uint32 type; };
template <> struct word_selector<22> { typedef uint32 type; };
template <> struct word_selector<24> { typedef uint32 type; };
template <> struct word_selector<26> { typedef uint32 type; };
template <> struct word_selector<28> { typedef uint32 type; };
template <> struct word_selector<30> { typedef uint32 type; };
template <> struct word_selector<32> { typedef uint32 type; };
template <> struct word_selector<48> { typedef uint64 type; };
template <> struct word_selector<64> { typedef uint64 type; };
typedef ConcatenatedStringSet<
PackedStream<uint32*,uint8,2u,false,uint64>,
uint64*> string_set_2bit;
typedef ConcatenatedStringSet<
PackedStream<uint32*,uint8,2u,false,uint64>,
uint64*> string_set_4bit;
typedef ConcatenatedStringSet<
PackedStream<uint32*,uint8,8u,false,uint64>,
uint64*> string_set_8bit;
typedef ConcatenatedStringSet<
PackedStream<uint32*,uint8,2u,true,uint64>,
uint64*> string_set_2bit_be;
typedef ConcatenatedStringSet<
PackedStream<uint64*,uint8,2u,true,uint64>,
uint64*> string_set_2bit_u64_be;
typedef PackedStream<uint32*,uint8,2u,false,uint64> string_2bit_le;
typedef PackedStream<uint32*,uint8,4u,false,uint64> string_4bit_le;
typedef PackedStream<uint32*,uint8,8u,false,uint64> string_8bit_le;
typedef PackedStream<uint32*,uint8,2u,true,uint64> string_2bit_be;
typedef PackedStream<uint32*,uint8,4u,true,uint64> string_4bit_be;
typedef PackedStream<uint32*,uint8,8u,true,uint64> string_8bit_be;
void extract_radices(
const priv::string_set_2bit_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint32 word_bits,
const uint2* suffixes,
uint32* radices,
uint8* symbols = NULL);
void extract_radices(
const priv::string_set_2bit_u64_be string_set,
const uint32 n_suffixes,
const uint32 word_begin,
const uint32 word_end,
const uint32 word_bits,
const uint2* suffixes,
uint64* radices,
uint8* symbols = NULL);
// make sure a given buffer is big enough
//
template <typename VectorType>
void alloc_storage(VectorType& vec, const uint64 size)
{
if (vec.size() < size)
{
try
{
vec.clear();
vec.resize( size );
}
catch (...)
{
log_error(stderr,"alloc_storage() : allocation failed!\n");
throw;
}
}
}
/// set the last n bits to 0
///
template <typename storage_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
storage_type clearmask(const uint32 n) { return ~((storage_type(1u) << n)-1u); }
/// A functor to cast from one type into another
///
struct in_range_functor
{
typedef uint32 argument_type;
typedef bool result_type;
/// constructor
///
in_range_functor(const uint32 _begin, const uint32 _end) : begin(_begin), end(_end) {}
/// return true if i is in the range
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator() (const uint32 i) const { return i >= begin && i < end; }
const uint32 begin, end;
};
/// A functor subtracting the second element of a pair from the first
///
struct minus_one
{
typedef uint32 argument_type;
typedef uint32 result_type;
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint32 i) const { return i - 1; }
};
/// A functor adding the given constant to all intergers
///
struct offset_functor
{
typedef uint32 argument_type;
typedef uint32 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
offset_functor(const uint32 _offset) : offset(_offset) {}
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint32 i) const { return i + offset; }
const uint32 offset;
};
/// A functor dividing all integers by the given constant
///
struct add_divide_functor
{
typedef uint32 argument_type;
typedef uint32 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
add_divide_functor(const uint32 _a, const uint32 _k) : a(_a), k(_k) {}
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint32 i) const { return (i + a) / k; }
const uint32 a;
const uint32 k;
};
/// A functor fetching the length of the i-th string in a set
///
template <typename string_set_type>
struct length_functor
{
typedef uint32 argument_type;
typedef uint32 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
length_functor(const string_set_type _string_set, const bool _extended) : string_set(_string_set), extended(_extended) {}
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint32 i) const
{
return string_set[i].length() + (extended ? 1u : 0u);
}
string_set_type string_set;
bool extended;
};
/// A functor adding the given constant to the string id of a suffix
///
struct suffix_offset_functor
{
typedef uint2 argument_type;
typedef uint2 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
suffix_offset_functor(const uint32 _offset) : offset(_offset) {}
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint2 operator() (const uint2 suffix) const { return make_uint2( suffix.x, suffix.y + offset ); }
const uint32 offset;
};
/// A functor returning the given component of a suffix
///
enum SuffixComponent
{
SUFFIX_ID = 0,
STRING_ID = 1
};
template <SuffixComponent COMP>
struct suffix_component_functor
{
typedef uint2 argument_type;
typedef uint32 result_type;
/// return the length of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const uint2 suffix) const { return COMP == STRING_ID ? suffix.y : suffix.x; }
};
template <uint32 WORD_BITS, uint32 DOLLAR_BITS, uint32 SYMBOL_SIZE, typename string_type, typename index_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 extract_word_generic(
const string_type string,
const index_type string_len,
const index_type suffix_idx,
const uint32 w)
{
const uint32 SYMBOLS_PER_WORD = uint32(WORD_BITS - DOLLAR_BITS)/SYMBOL_SIZE;
const uint32 SYMBOL_OFFSET = uint32(WORD_BITS) - SYMBOL_SIZE;
uint32 word = 0u;
for (uint32 j = 0; j < SYMBOLS_PER_WORD; ++j)
{
const index_type jj = suffix_idx + w*SYMBOLS_PER_WORD + j;
const uint32 c = jj < string_len ? string[jj] : 0u;
word |= (c << (SYMBOL_OFFSET - j*SYMBOL_SIZE));
}
if (DOLLAR_BITS)
{
// encode the dollar's position in the least significant bits of the word
const uint32 dollar_offset =
string_len <= suffix_idx + w*SYMBOLS_PER_WORD + SYMBOLS_PER_WORD ? // is there a dollar sign?
(string_len < suffix_idx + w*SYMBOLS_PER_WORD) ? 0u :
uint32(string_len - suffix_idx - w*SYMBOLS_PER_WORD) :
(1u << DOLLAR_BITS)-1u; // no dollar sign in this word
return word | dollar_offset;
}
else
return word;
}
/// return how many symbols are encoded per word
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 symbols_per_word()
{
const uint32 SYMBOLS_PER_WORD = (WORD_BITS - DOLLAR_BITS)/SYMBOL_SIZE;
return SYMBOLS_PER_WORD;
}
template <uint32 WORD_BITS, uint32 DOLLAR_BITS, uint32 SYMBOL_SIZE, typename storage_type, typename index_type, typename sufindex_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
typename std::iterator_traits<storage_type>::value_type extract_word_packed(
const storage_type base_words,
const index_type string_len,
const index_type string_off,
const sufindex_type suffix_idx,
const uint32 w)
{
typedef typename std::iterator_traits<storage_type>::value_type word_type;
const uint32 STORAGE_BITS = uint32( 8u * sizeof(word_type) );
const uint32 STORAGE_SYMBOLS = STORAGE_BITS / SYMBOL_SIZE;
const uint32 SYMBOLS_PER_WORD = uint32(WORD_BITS - DOLLAR_BITS)/SYMBOL_SIZE;
//const uint32 SYMBOL_OFFSET = uint32(WORD_BITS) - SYMBOL_SIZE;
const sufindex_type suffix_off = suffix_idx + w*SYMBOLS_PER_WORD; // suffix offset
// do we have any symbols to encode?
if (suffix_off >= string_len)
return 0u;
const index_type range_len = string_len - suffix_off; // partial suffix length
const index_type range_off = string_off + suffix_off; // partial suffix offset
const uint32 n_symbols = (uint32)nvbio::min( range_len, index_type(SYMBOLS_PER_WORD) ); // symbols to pack
//
// As SYMBOLS_PER_WORD is less than 32, we know that the symbols we are looking for
// will span at most 2 32-bit words.
// Of the n_symbols we want to read, there might be n1 in the first word, and n2 in the
// second word.
// As the beginning of our symbol stream (range_off) might stride the 32-bit word boundary,
// the highest m1 = range_off % 16 symbols of the first word might have to be discarded,
// and we'll find our n1 symbols in the following position:
//
// |-------------------------------------------|
// |* * * * * *| x x x x x x x x x | * * * * * |
// |-------------------------------------------|
// | m1 | n1 | r1 |
//
// What we do is shifting the n1 symbols to the top of the 32-bit word (i.e. m1 to the left).
// Clearing the remaining symbols is only needed if n1 == n_symbols; if n1 < n_symbols, r1 will
// be necessarily zero.
//
// At this point, we might have n2 more symbols to read in the highest bits of the second word:
//
// |-------------------------------------------|
// | y y y y y y y y | * * * * * * * * * * * * |
// |-------------------------------------------|
// | n2 | r2 |
//
// which we need to shift right by (n1*SYMBOL_SIZE) bits.
// At the very end, we'll shift everything right by (32 - WORD_BITS) bits in order to have
// our output tightly packed in the lowest WORD_BITS:
//
// 32 WORD_BITS DOLLAR_BITS 0
// |-----------|-----------------------|-------|
// | * * * * * | x x x x | y y y | 0 0 | $ $ $ | // notice the possible presence of 0's before
// |-------------------------------------------| // the $ sign: these are bits that need to be
// | | n1 | n2 | | | // cleared if the suffix is short
//
const uint32 k1 = uint32( range_off/STORAGE_SYMBOLS ); // index of the first word
const uint32 m1 = range_off & (STORAGE_SYMBOLS-1); // offset in the word
const uint32 r1 = STORAGE_SYMBOLS - m1; // symbols to read
const word_type word1 = (base_words[ k1 ] << (m1*SYMBOL_SIZE)); // fetch the first word, shifted left
word_type word = word1;
if (n_symbols > r1) // do we need to read another word?
{
const word_type word2 = base_words[ k1+1u ]; // fetch the second word
word |= word2 >> (r1*SYMBOL_SIZE); // shift by n1 symbols to the right
}
word >>= (STORAGE_BITS - WORD_BITS); // align the top to WORD_BITS
// clear every symbol we don't need among the word's LSD
word &= clearmask<word_type>( WORD_BITS - n_symbols*SYMBOL_SIZE );
if (DOLLAR_BITS)
{
// encode the dollar's position in the least significant bits of the word
const word_type dollar_offset =
range_len <= SYMBOLS_PER_WORD ? // is there a dollar sign?
range_len :
(1u << DOLLAR_BITS)-1u; // no dollar sign in this word
return word | dollar_offset;
}
else
return word;
}
template <uint32 WORD_BITS, uint32 DOLLAR_BITS, uint32 SYMBOL_SIZE, typename storage_type, typename index_type, typename sufindex_type, typename output_iterator>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void extract_word_packed(
const storage_type base_words,
const index_type string_len,
const index_type string_off,
const sufindex_type suffix_idx,
const uint32 word_begin,
const uint32 word_end,
output_iterator words)
{
typedef typename std::iterator_traits<storage_type>::value_type word_type;
const uint32 STORAGE_BITS = uint32( 8u * sizeof(word_type) );
const uint32 STORAGE_SYMBOLS = STORAGE_BITS / SYMBOL_SIZE;
const uint32 SYMBOLS_PER_WORD = uint32(WORD_BITS - DOLLAR_BITS)/SYMBOL_SIZE;
//const uint32 SYMBOL_OFFSET = uint32(WORD_BITS) - SYMBOL_SIZE;
sufindex_type suffix_off = suffix_idx + word_begin*SYMBOLS_PER_WORD; // suffix offset
index_type range_len = string_len - suffix_off; // partial suffix length
index_type range_off = string_off + suffix_off; // partial suffix offset
const uint32 cache_begin = uint32( range_off / STORAGE_SYMBOLS );
#if defined(PLATFORM_X86) && !defined(NVBIO_DEVICE_COMPILATION)
// use SSE to load all the words we need in a small cache
const uint32 SSE_WORDS = 16u / sizeof( word_type );
const uint32 cache_end = uint32( (range_off + (word_end - word_begin)*SYMBOLS_PER_WORD) / STORAGE_SYMBOLS );
__m128i sse_cache[8];
for (uint32 w = cache_begin; w < cache_end; w += SSE_WORDS)
sse_cache[ (w - cache_begin)/SSE_WORDS ] = _mm_loadu_si128( (const __m128i*)(base_words + w) );
const word_type* cached_words = (const word_type*)sse_cache;
#elif 0
const_cached_iterator<storage_type> cached_words( base_words + cache_begin );
#else
const storage_type cached_words = base_words + cache_begin;
#endif
for (uint32 w = word_begin; w < word_end; ++w)
{
// do we have any symbols to encode?
if (suffix_off >= string_len)
{
words[w - word_begin] = 0u;
continue;
}
const uint32 n_symbols = (uint32)nvbio::min( range_len, index_type(SYMBOLS_PER_WORD) ); // symbols to pack
//
// As SYMBOLS_PER_WORD is less than 32, we know that the symbols we are looking for
// will span at most 2 32-bit words.
// Of the n_symbols we want to read, there might be n1 in the first word, and n2 in the
// second word.
// As the beginning of our symbol stream (range_off) might stride the 32-bit word boundary,
// the highest m1 = range_off % 16 symbols of the first word might have to be discarded,
// and we'll find our n1 symbols in the following position:
//
// |-------------------------------------------|
// |* * * * * *| x x x x x x x x x | * * * * * |
// |-------------------------------------------|
// | m1 | n1 | r1 |
//
// What we do is shifting the n1 symbols to the top of the 32-bit word (i.e. m1 to the left).
// Clearing the remaining symbols is only needed if n1 == n_symbols; if n1 < n_symbols, r1 will
// be necessarily zero.
//
// At this point, we might have n2 more symbols to read in the highest bits of the second word:
//
// |-------------------------------------------|
// | y y y y y y y y | * * * * * * * * * * * * |
// |-------------------------------------------|
// | n2 | r2 |
//
// which we need to shift right by (n1*SYMBOL_SIZE) bits.
// At the very end, we'll shift everything right by (32 - WORD_BITS) bits in order to have
// our output tightly packed in the lowest WORD_BITS:
//
// 32 WORD_BITS DOLLAR_BITS 0
// |-----------|-----------------------|-------|
// | * * * * * | x x x x | y y y | 0 0 | $ $ $ | // notice the possible presence of 0's before
// |-------------------------------------------| // the $ sign: these are bits that need to be
// | | n1 | n2 | | | // cleared if the suffix is short
//
const uint32 k1 = uint32( range_off/STORAGE_SYMBOLS ) - cache_begin; // index of the first word
const uint32 m1 = range_off & (STORAGE_SYMBOLS-1); // offset in the word
const uint32 r1 = STORAGE_SYMBOLS - m1; // symbols left in the word
const word_type word1 = (cached_words[ k1 ] << (m1*SYMBOL_SIZE)); // fetch the first word, shifted left
word_type word = word1;
if (n_symbols > r1) // do we need to read another word?
{
const word_type word2 = cached_words[ k1+1u ]; // fetch the second word
word |= word2 >> (r1*SYMBOL_SIZE); // shift by n1 symbols to the right
}
word >>= (STORAGE_BITS - WORD_BITS); // align the top to WORD_BITS
// clear every symbol we don't need among the word's LSD
word &= clearmask<word_type>( WORD_BITS - n_symbols*SYMBOL_SIZE );
if (DOLLAR_BITS)
{
// encode the dollar's position in the least significant bits of the word
const word_type dollar_offset =
range_len <= SYMBOLS_PER_WORD ? // is there a dollar sign?
range_len :
(1u << DOLLAR_BITS)-1u; // no dollar sign in this word
word |= dollar_offset;
}
// write the word out
words[ w - word_begin ] = word;
suffix_off += SYMBOLS_PER_WORD;
range_len -= SYMBOLS_PER_WORD;
range_off += SYMBOLS_PER_WORD;
}
}
/// A functor to localize suffixes, making the conversion: global-suffix-id -> (string-id,suffix-id)
///
struct localize_suffix_functor
{
typedef uint32 argument_type;
typedef uint2 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
localize_suffix_functor(const uint32* _cum_lengths, const uint32* _string_ids, const uint32 _string_offset = 0u) :
cum_lengths(_cum_lengths),
string_ids(_string_ids),
string_offset( _string_offset ) {}
/// return the localized suffix
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint32 global_suffix_idx) const
{
const uint32 string_idx = string_ids[ global_suffix_idx ];
const uint32 suffix_idx = global_suffix_idx - (string_idx ? cum_lengths[ string_idx-1u ] : 0u);
return make_uint2( suffix_idx, string_offset + string_idx );
}
const uint32* cum_lengths;
const uint32* string_ids;
const uint32 string_offset;
};
/// A functor fetching the w'th word worth of 2-bit symbols from the i-th string in a set
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename string_set_type, typename word_type>
struct local_set_suffix_word_functor
{
typedef uint2 argument_type;
typedef word_type result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
local_set_suffix_word_functor(const string_set_type _string_set, const uint32 _w) :
string_set(_string_set),
w(_w) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint2 local_suffix_idx) const
{
typedef typename string_set_type::string_type string_type;
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const string_type string = string_set[string_idx];
const uint32 string_len = string.length();
return result_type( extract_word_generic<WORD_BITS,DOLLAR_BITS,SYMBOL_SIZE>(
string,
string_len,
suffix_idx,
w ) );
}
string_set_type string_set;
uint32 w;
};
/// A functor fetching the w'th word worth of 2-bit symbols from the given (string,suffix) in a set
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename storage_type, typename word_type, typename offsets_iterator>
struct local_set_suffix_word_functor<
SYMBOL_SIZE, WORD_BITS, DOLLAR_BITS,
ConcatenatedStringSet<
PackedStream<storage_type,uint8,SYMBOL_SIZE,true,typename std::iterator_traits<offsets_iterator>::value_type>,
offsets_iterator>,
word_type>
{
typedef typename std::iterator_traits<offsets_iterator>::value_type index_type;
typedef ConcatenatedStringSet<
PackedStream<storage_type,uint8,SYMBOL_SIZE,true,index_type>,
offsets_iterator> string_set_type;
typedef uint2 argument_type;
typedef word_type result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
local_set_suffix_word_functor(const string_set_type _string_set, const uint32 _w) :
string_set(_string_set),
w(_w) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint2 local_suffix_idx) const
{
typedef typename string_set_type::string_type string_type;
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const index_type string_off = string_set.offsets()[ string_idx ];
const index_type string_end = string_set.offsets()[ string_idx+1u ];
const index_type string_len = uint32( string_end - string_off );
const storage_type base_words = string_set.base_string().stream();
return result_type( extract_word_packed<WORD_BITS,DOLLAR_BITS,SYMBOL_SIZE>(
base_words,
string_len,
string_off,
suffix_idx,
w ) );
}
string_set_type string_set;
uint32 w;
};
/// A functor fetching the w'th word worth of 2-bit symbols from the i-th suffix in a set
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename string_set_type, typename word_type>
struct global_set_suffix_word_functor
{
typedef uint32 argument_type;
typedef word_type result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
global_set_suffix_word_functor(const string_set_type _string_set, const uint32* _cum_lengths, const uint32* _string_ids, const uint32 _w) :
word_functor( _string_set, _w ),
localizer( _cum_lengths, _string_ids ) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint32 global_suffix_idx) const
{
return word_functor( localizer( global_suffix_idx ) );
}
local_set_suffix_word_functor<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS,string_set_type,word_type> word_functor;
localize_suffix_functor localizer;
};
/// A functor fetching the w'th word worth of 2-bit symbols from the i-th string in a set
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename string_type, typename word_type>
struct string_suffix_word_functor
{
typedef uint32 argument_type;
typedef word_type result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_suffix_word_functor(const uint64 _string_len, const string_type _string, const uint32 _w) :
string_len(_string_len),
string(_string),
w(_w) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint64 suffix_idx) const
{
return result_type( extract_word_generic<WORD_BITS,DOLLAR_BITS,SYMBOL_SIZE>(
string,
string_len,
suffix_idx,
w ) );
}
const uint64 string_len;
string_type string;
uint32 w;
};
/// A functor fetching the w'th word worth of 2-bit symbols from the i-th string in a set
///
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename storage_type, typename symbol_type, typename index_type, typename word_type>
struct string_suffix_word_functor<
SYMBOL_SIZE, WORD_BITS, DOLLAR_BITS,
PackedStream<storage_type,symbol_type,SYMBOL_SIZE,true,index_type>,
word_type>
{
typedef typename PackedStream<storage_type,symbol_type,SYMBOL_SIZE,true,index_type>::iterator string_type;
typedef uint2 argument_type;
typedef word_type result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_suffix_word_functor(const index_type _string_len, const string_type _string, const uint32 _w) :
string_len(_string_len),
string(_string),
w(_w) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const index_type suffix_idx) const
{
const storage_type base_words = string.stream();
return result_type( extract_word_packed<WORD_BITS,DOLLAR_BITS,SYMBOL_SIZE>(
base_words,
string_len,
string.index(),
suffix_idx,
w ) );
}
const index_type string_len;
string_type string;
uint32 w;
};
/// A binary functor calculating whether two suffixes differ (returning 1) or not (returning 0)
///
template <typename string_type>
struct string_suffix_difference
{
typedef uint32 first_argument_type;
typedef uint32 second_argument_type;
typedef uint32 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_suffix_difference(const uint64 _string_len, const string_type _string, const uint32 _cmp_len) :
string_len(_string_len),
string(_string),
cmp_len( _cmp_len ) {}
/// return the w'th word of the i-th string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint64 suffix_idx1, const uint64 suffix_idx2) const
{
// if one of the two suffixes is less than cmp_len, then the two suffixes must
// necessarily differ (because no two suffixes have the same length)
if (string_len - suffix_idx1 < cmp_len ||
string_len - suffix_idx2 < cmp_len)
return 1u;
for (uint32 i = 0; i < cmp_len; ++i)
{
if (string[suffix_idx1 + i] != string[suffix_idx2 + i])
return 1u;
}
return 0u;
}
const uint64 string_len;
string_type string;
uint32 cmp_len;
};
/// A binary functor comparing two suffixes lexicographically
///
template <uint32 SYMBOL_SIZE, typename string_type>
struct string_suffix_less
{
typedef uint32 first_argument_type;
typedef uint32 second_argument_type;
typedef uint32 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_suffix_less(const uint64 _string_len, const string_type _string) :
string_len(_string_len),
string(_string) {}
/// return true if the first suffix is lexicographically smaller than the second, false otherwise
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint64 suffix_idx1, const uint64 suffix_idx2) const
{
const uint32 WORD_BITS = 32u; // use 32-bit words
const uint32 DOLLAR_BITS = 4u; // 4 is the minimum number needed to encode up to 16 symbols per word
const uint32 SYMBOLS_PER_WORD = symbols_per_word<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS>();
const uint32 n_words = uint32( nvbio::min(
(string_len - suffix_idx1),
(string_len - suffix_idx2) ) + SYMBOLS_PER_WORD-1 ) / SYMBOLS_PER_WORD;
// loop through all string-words
for (uint32 w = 0; w < n_words; ++w)
{
string_suffix_word_functor<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS,string_type,uint32> word_functor( string_len, string, w );
const uint32 w1 = word_functor( suffix_idx1 );
const uint32 w2 = word_functor( suffix_idx2 );
if (w1 < w2) return true;
if (w1 > w2) return false;
}
return false;
}
const uint64 string_len;
string_type string;
};
/// given a string, return the symbol preceding each of its suffixes, or 255u to mark the
/// special $ symbol used for the first suffix.
///
template <typename string_type>
struct string_bwt_functor
{
typedef uint64 argument_type;
typedef uint8 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_bwt_functor(const uint64 _string_len, const string_type _string) :
string_len(_string_len),
string(_string) {}
/// return the symbol preceding the given suffix
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const argument_type suffix_idx) const
{
return suffix_idx ? string[suffix_idx-1] : 255u; // use 255u to mark the dollar sign
}
const uint64 string_len;
const string_type string;
};
/// given a string set, return the symbol preceding each of its suffixes, or 255u to mark the
/// special $ symbol used for the first suffix.
///
template <typename string_set_type>
struct string_set_bwt_functor
{
typedef uint2 argument_type;
typedef uint8 result_type;
/// constructor
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
string_set_bwt_functor(const string_set_type _string_set) :
string_set(_string_set) {}
/// return the symbol preceding the given suffix
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const argument_type local_suffix_idx) const
{
typedef typename string_set_type::string_type string_type;
const uint32 string_idx = local_suffix_idx.y;
const uint32 suffix_idx = local_suffix_idx.x;
const string_type string = string_set[string_idx];
return suffix_idx ? string[suffix_idx-1] : 255u; // use 255u to mark the dollar sign
}
/// return the last symbol of a given string
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint32 string_idx) const
{
typedef typename string_set_type::string_type string_type;
const string_type string = string_set[string_idx];
return string[ string.length()-1 ];
}
const string_set_type string_set;
};
/// A binary functor implementing some custom logic to remove singletons from a set of segment-flags
///
struct remove_singletons
{
typedef uint32 first_argument_type;
typedef uint32 second_argument_type;
typedef uint32 result_type;
/// functor operator
///
/// \return flag1 && flag2 ? 0 : 1
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const uint32 flag1, const uint32 flag2) const
{
return (flag1 && flag2) ? 0u : 1u;
}
};
/// A binary functor to merge keys with new radices
///
struct merge_keys
{
typedef uint32 first_argument_type;
typedef uint64 second_argument_type;
typedef uint64 result_type;
/// functor operator
///
/// \return (key << 32u) | radix
///
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
result_type operator() (const first_argument_type radix, const second_argument_type key) const
{
return (key << 32u) | second_argument_type( radix );
}
};
/*
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename string_set_type>
struct dispatch_set_suffix_radices
{
template <typename radix_iterator>
void enact(
const string_set_type& string_set,
const SetSuffixFlattener<SYMBOL_SIZE>& set_flattener,
radix_iterator radices)
{
typedef typename std::iterator_traits<radix_iterator>::value_type word_type;
thrust::transform(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + set_flattener.n_suffixes,
radices,
global_set_suffix_word_functor<SYMBOL_SIZE,BITS,DOLLAR_BITS,string_set_type,word_type>(
string_set,
nvbio::device_view( set_flattener.cum_lengths ),
nvbio::device_view( set_flattener.string_ids ),
0u ) );
}
};
template <uint32 SYMBOL_SIZE, uint32 WORD_BITS, uint32 DOLLAR_BITS, typename storage_type, typename index_type>
struct dispatch_set_suffix_radices<
SYMBOL_SIZE, WORD_BITS, DOLLAR_BITS,
ConcatenatedStringSet<PackedStream<storage_type,uint8,SYMBOL_SIZE,true,index_type>,index_type*>,
word_type>
{
typedef ConcatenatedStringSet<
PackedStream<storage_type,uint8,SYMBOL_SIZE,true,index_type>,
index_type*> string_set_type;
template <typename radix_iterator>
void enact(
const string_set_type& string_set,
const SetSuffixFlattener<SYMBOL_SIZE>& set_flattener,
radix_iterator radices)
{
typedef typename std::iterator_traits<radix_iterator>::value_type word_type;
thrust::transform(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + set_flattener.n_suffixes,
radices,
global_set_suffix_word_functor<SYMBOL_SIZE,BITS,DOLLAR_BITS,string_set_type,word_type>(
string_set,
nvbio::device_view( set_flattener.cum_lengths ),
nvbio::device_view( set_flattener.string_ids ),
word_idx ) );
}
};
*/
template <uint32 BITS, uint32 DOLLAR_BITS>
struct Bits {};
/// A helper class allowing to "flatten" the suffixes in a given string-set, i.e. to extract
/// all of them word-by-word in a flattened array.
///
template <uint32 SYMBOL_SIZE>
struct SetSuffixFlattener
{
SetSuffixFlattener(mgpu::ContextPtr _mgpu) :
d_scan_time(0.0f),
d_search_time(0.0f),
m_mgpu( _mgpu ) {}
/// clear internal timers
///
void clear_timers()
{
d_scan_time = 0.0f;
d_search_time = 0.0f;
}
/// reserve storage
///
void reserve(const uint32 n_strings, const uint32 n_suffixes)
{
alloc_storage( cum_lengths, n_strings );
alloc_storage( string_ids, n_suffixes );
}
/// return the amount of device memory needed
///
uint64 needed_device_memory(const uint32 n_strings, const uint32 n_suffixes) const
{
return (n_strings + n_suffixes) * sizeof(uint32);
}
/// initialize this flattener, building the auxiliary data-structures needed
/// to extract the radices
///
template <typename string_set_type>
void set(const string_set_type& string_set, const bool empty_suffixes = true)
{
const uint32 n = string_set.size();
cuda::Timer timer;
timer.start();
// compute the cumulative sum of the string lengths in the set - we will use this for
// building the map: (global suffix index -> string index)
alloc_storage( cum_lengths, n );
cuda::inclusive_scan(
n,
thrust::make_transform_iterator( thrust::make_counting_iterator(0u), length_functor<string_set_type>( string_set, empty_suffixes ) ),
cum_lengths.begin(),
thrust::plus<uint32>(),
temp_storage );
// compute the number of suffixes
n_suffixes = cum_lengths[n-1];
timer.stop();
d_scan_time += timer.seconds();
timer.start();
// assign the string id to each suffix - this is done by a simple binary search on the suffix index
// in the vector of cumulative string lengths
alloc_storage( string_ids, n_suffixes );
// find the end of each bin of values
mgpu::SortedSearch<mgpu::MgpuBoundsLower>(
thrust::make_counting_iterator<uint32>(0u),
n_suffixes,
thrust::make_transform_iterator( cum_lengths.begin(), minus_one() ),
n,
string_ids.begin(),
*m_mgpu );
timer.stop();
d_search_time += timer.seconds();
}
/// extract the given radix of all suffixes
///
template <uint32 BITS, uint32 DOLLAR_BITS, typename string_set_type, typename index_iterator, typename radix_iterator>
void flatten(
const string_set_type& string_set,
const uint32 word_idx,
const Bits<BITS,DOLLAR_BITS> word_bits,
const index_iterator indices,
radix_iterator radices)
{
typedef typename std::iterator_traits<radix_iterator>::value_type word_type;
thrust::transform(
indices,
indices + n_suffixes,
radices,
global_set_suffix_word_functor<SYMBOL_SIZE,BITS,DOLLAR_BITS,string_set_type,word_type>(
string_set,
nvbio::device_view( cum_lengths ),
nvbio::device_view( string_ids ),
word_idx ) );
}
/// compute the maximum suffix length among the specified range
///
template <typename string_set_type>
uint32 max_length(
const string_set_type& string_set, const bool empty_suffixes = true)
{
// compute the maximum string length in the set
return cuda::reduce(
uint32( string_set.size() ),
thrust::make_transform_iterator(
thrust::make_counting_iterator<uint32>(0u),
length_functor<string_set_type>( string_set, empty_suffixes ) ),
thrust::maximum<uint32>(),
temp_storage );
}
/// compute the maximum suffix length among the specified range
///
template <typename string_set_type, typename index_iterator>
uint32 max_length(
const string_set_type& string_set,
const index_iterator indices_begin,
const index_iterator indices_end)
{
// TODO: this function is conservative, in the sense it returns the maximum *string* length;
// however, each suffix might be shorter than the string it belongs to.
return indices_end <= indices_begin ? 0u :
cuda::reduce(
indices_end - indices_begin,
thrust::make_transform_iterator(
thrust::make_permutation_iterator( string_ids.begin(), indices_begin ),
length_functor<string_set_type>( string_set, false ) ),
thrust::maximum<uint32>(),
temp_storage );
}
/// return the amount of used device memory
///
uint64 allocated_device_memory() const
{
return
cum_lengths.size() * sizeof(uint32) +
string_ids.size() * sizeof(uint32) +
temp_storage.size() * sizeof(uint8);
}
uint32 n_suffixes; ///< number of suffixes in the string set
thrust::device_vector<uint32> cum_lengths; ///< cumulative string lengths
thrust::device_vector<uint32> string_ids; ///< a vector containing the string index corresponding
/// to each flattened suffixes; i.e. if the set contains
/// 3 strings of length (3, 2, 5), the string ids will be
/// the vector (0,0,0,1,1,2,2,2,2,2).
thrust::device_vector<uint8> temp_storage;
float d_scan_time;
float d_search_time;
mgpu::ContextPtr m_mgpu;
};
/// A helper class to load a chunk of a string_set from the host onto the device
///
template <uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename storage_type, typename offsets_iterator, typename input_tag, typename output_tag>
struct ChunkLoader {};
/// A helper class to load a chunk of a string_set from the host onto the device
///
template <uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename storage_type, typename offsets_iterator>
struct ChunkLoader<SYMBOL_SIZE,BIG_ENDIAN,storage_type,offsets_iterator,host_tag,device_tag>
{
// infer the word type
typedef typename std::iterator_traits<storage_type>::value_type word_type;
typedef typename std::iterator_traits<offsets_iterator>::value_type index_type;
typedef cuda::load_pointer<word_type,cuda::LOAD_LDG> word_pointer;
typedef PackedStream<word_pointer,uint8,SYMBOL_SIZE,BIG_ENDIAN> packed_stream_type;
typedef typename packed_stream_type::iterator packed_stream_iterator;
typedef ConcatenatedStringSet<packed_stream_iterator,uint32*> chunk_set_type;
// infer the word size
static const uint32 SYMBOLS_PER_WORD = uint32(8u*sizeof(word_type))/SYMBOL_SIZE;
uint64 needed_device_memory(const uint32 max_strings, const uint32 max_symbols) const
{
const uint32 max_words = util::divide_ri( max_symbols, SYMBOLS_PER_WORD ) + 2;
return (max_strings+1) * sizeof(uint32) +
max_words * sizeof(word_type);
}
void reserve(const uint32 max_strings, const uint32 max_symbols)
{
const uint32 max_words = util::divide_ri( max_symbols, SYMBOLS_PER_WORD ) + 2;
alloc_storage( h_chunk_offsets, max_strings+1 );
alloc_storage( d_chunk_offsets, max_strings+1 );
alloc_storage( d_chunk_string, max_words );
}
chunk_set_type load(
const ConcatenatedStringSet<
typename PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>::iterator,
offsets_iterator> string_set,
const uint32 chunk_begin,
const uint32 chunk_end)
{
const uint32 chunk_size = chunk_end - chunk_begin;
alloc_storage( h_chunk_offsets, chunk_size+1 );
alloc_storage( d_chunk_offsets, chunk_size+1 );
// find the words overlapped by the chunk
const uint64 begin_index = string_set.offsets()[ chunk_begin ];
const uint64 end_index = string_set.offsets()[ chunk_end ];
const uint64 begin_word = (begin_index / SYMBOLS_PER_WORD);
const uint64 end_word = (end_index + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
const uint32 chunk_words = uint32( end_word - begin_word );
const word_type* base_words = string_set.base_string().stream();
alloc_storage( d_chunk_string, chunk_words );
// copy them to the device
thrust::copy(
base_words + begin_word,
base_words + begin_word + chunk_words,
d_chunk_string.begin() );
// build the host offsets
uint32 chunk_symbols = uint32( begin_index % SYMBOLS_PER_WORD );
h_chunk_offsets[0] = chunk_symbols;
for (uint32 i = 0; i < chunk_size; ++i)
{
chunk_symbols += string_set[ chunk_begin + i ].size();
h_chunk_offsets[i+1] = chunk_symbols;
}
// copy the offsets to the device
thrust::copy(
h_chunk_offsets.begin(),
h_chunk_offsets.begin() + chunk_size+1,
d_chunk_offsets.begin() );
// finally assemble the device chunk string-set
packed_stream_type d_packed_stream( word_pointer( nvbio::plain_view( d_chunk_string ) ) );
return chunk_set_type(
chunk_size,
d_packed_stream.begin(),
nvbio::plain_view( d_chunk_offsets ) );
}
thrust::host_vector<uint32> h_chunk_offsets;
thrust::device_vector<word_type> d_chunk_string;
thrust::device_vector<uint32> d_chunk_offsets;
};
/// A helper class to load a chunk of a string_set from the host onto the device
///
template <uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename storage_type, typename offsets_iterator, typename system_tag>
struct ChunkLoader<SYMBOL_SIZE,BIG_ENDIAN,storage_type,offsets_iterator,system_tag,system_tag>
{
typedef typename std::iterator_traits<offsets_iterator>::value_type index_type;
typedef const ConcatenatedStringSet<
typename PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>::iterator,
offsets_iterator> string_set_type;
typedef string_set_type chunk_set_type;
chunk_set_type load(
const string_set_type string_set,
const uint32 chunk_begin,
const uint32 chunk_end)
{
// assemble the device chunk string-set
return chunk_set_type(
uint32( chunk_end - chunk_begin ),
string_set.base_string(),
string_set.offsets() + chunk_begin );
}
};
/// extract the given radix from the given suffixes of a string
///
template <uint32 SYMBOL_SIZE, uint32 BITS, uint32 DOLLAR_BITS, typename string_type, typename index_iterator, typename radix_iterator>
void flatten_string_suffixes(
const uint64 string_len,
const string_type& string,
const uint32 word_idx,
const index_iterator indices_begin,
const index_iterator indices_end,
radix_iterator radices)
{
typedef typename std::iterator_traits<radix_iterator>::value_type word_type;
thrust::transform(
indices_begin,
indices_end,
radices,
string_suffix_word_functor<SYMBOL_SIZE, BITS, DOLLAR_BITS,string_type,word_type>(
string_len,
string,
word_idx ) );
}
/// A context class to perform suffix bucketing
///
template <uint32 SYMBOL_SIZE, uint32 N_BITS, uint32 DOLLAR_BITS>
struct StringSuffixBucketer
{
typedef uint32 word_type;
StringSuffixBucketer() : d_setup_time(0.0f), d_flatten_time(0.0f), d_count_sort_time(0.0f), d_collect_sort_time(0.0f), d_remap_time(0.0f), d_copy_time(0.0f), d_filter_time(0.0f) {}
/// count the number of suffixes falling in each bucket, where the buckets
/// are defined by the first n_bits of the suffix
///
template <typename suffix_iterator, typename string_type>
void count(
const uint32 n_suffixes,
const suffix_iterator suffixes,
const uint32 string_length,
const string_type& string)
{
cuda::Timer timer;
const uint32 n_buckets = 1u << (N_BITS);
// initialize the temporary and output vectors
alloc_storage( d_indices, n_suffixes * 2u );
alloc_storage( d_radices, n_suffixes * 2u );
alloc_storage( d_buckets, n_buckets );
timer.start();
// extract the first radix word from each of the suffixes
flatten_string_suffixes<SYMBOL_SIZE, N_BITS,DOLLAR_BITS>(
string_length,
string,
0u, // load the first word
suffixes,
suffixes + n_suffixes,
d_radices.begin() );
timer.stop();
d_flatten_time += timer.seconds();
timer.start();
// sort the radices so as to make binning easy
cuda::SortBuffers<word_type*> sort_buffers;
cuda::SortEnactor sort_enactor;
sort_buffers.selector = 0;
sort_buffers.keys[0] = nvbio::device_view( d_radices );
sort_buffers.keys[1] = nvbio::device_view( d_radices ) + n_suffixes;
sort_enactor.sort( n_suffixes, sort_buffers, 0u, N_BITS );
//thrust::sort( d_radices.begin(), d_radices.begin() + n_suffixes );
timer.stop();
d_count_sort_time += timer.seconds();
// initialize the bucket counters
thrust::fill( d_buckets.begin(), d_buckets.end(), 0u );
// compute the number of effectively used buckets looking at the last non-empty one
const uint32 n_used_buckets = d_radices[ sort_buffers.selector * n_suffixes + n_suffixes-1 ] + 1u;
// find the end of each bin of values
thrust::upper_bound(
d_radices.begin() + sort_buffers.selector * n_suffixes,
d_radices.begin() + sort_buffers.selector * n_suffixes + n_suffixes,
thrust::make_counting_iterator<uint32>(0u),
thrust::make_counting_iterator<uint32>(0u) + n_used_buckets,
d_buckets.begin() );
// compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(
d_buckets.begin(), d_buckets.begin() + n_used_buckets,
d_buckets.begin());
}
/// collect the suffixes falling in a given set of buckets, where the buckets
/// are defined by the first n_bits of the suffix
///
template <typename suffix_iterator, typename string_type, typename bucketmap_iterator, typename output_iterator>
uint32 collect(
const uint32 n_suffixes,
const suffix_iterator suffixes,
const uint64 string_length,
const string_type& string,
const uint32 bucket_begin,
const uint32 bucket_end,
const bucketmap_iterator bucketmap,
output_iterator output_radices,
output_iterator output_indices)
{
cuda::Timer timer;
const uint32 n_buckets = 1u << N_BITS;
// initialize the temporary and output vectors
alloc_storage( d_indices, n_suffixes * 2u );
alloc_storage( d_radices, n_suffixes * 2u );
alloc_storage( d_buckets, n_buckets );
timer.start();
// extract the first radix word from each of the suffixes
flatten_string_suffixes<SYMBOL_SIZE,N_BITS,DOLLAR_BITS>(
string_length,
string,
0u, // load the first word
suffixes,
suffixes + n_suffixes,
d_radices.begin() );
timer.stop();
d_flatten_time += timer.seconds();
timer.start();
// determine if a radix is in the given bucket range
const priv::in_range_functor in_range = priv::in_range_functor( bucket_begin, bucket_end );
// retain only suffixes whose radix is between the specified buckets
const uint32 n_collected = cuda::copy_flagged(
n_suffixes,
thrust::make_zip_iterator( thrust::make_tuple( suffixes, d_radices.begin() ) ),
thrust::make_transform_iterator( d_radices.begin(), in_range ),
thrust::make_zip_iterator( thrust::make_tuple( d_indices.begin(), d_radices.begin() ) ) + n_suffixes,
d_temp_storage );
timer.stop();
d_filter_time += timer.seconds();
timer.start();
// remap the collected radices
thrust::gather(
d_radices.begin() + n_suffixes,
d_radices.begin() + n_suffixes + n_collected,
bucketmap,
d_radices.begin() + n_suffixes );
timer.stop();
d_remap_time += timer.seconds();
timer.start();
// sort the radices so as to make binning easy
cuda::SortBuffers<word_type*,uint64*> sort_buffers;
cuda::SortEnactor sort_enactor;
sort_buffers.selector = 0;
//#define SORT_BY_BUCKETS
#if defined(SORT_BY_BUCKETS)
sort_buffers.keys[0] = nvbio::device_view( d_radices ) + n_suffixes;
sort_buffers.keys[1] = nvbio::device_view( d_radices );
sort_buffers.values[0] = (uint64*)nvbio::device_view( d_indices ) + buffer_stride;
sort_buffers.values[1] = (uint64*)nvbio::device_view( d_indices );
sort_enactor.sort( n_collected, sort_buffers, 0u, N_BITS );
#endif
timer.stop();
d_collect_sort_time += timer.seconds();
//
// copy all the indices inside the range to the output
//
//alloc_storage( output_suffixes, n_suffixes );
//alloc_storage( output_radices, n_suffixes );
timer.start();
// the buffer selector had inverted semantics
sort_buffers.selector = 1 - sort_buffers.selector;
// and copy everything to the output
thrust::copy(
d_indices.begin() + sort_buffers.selector * n_suffixes,
d_indices.begin() + sort_buffers.selector * n_suffixes + n_collected,
output_indices );
// and copy everything to the output
thrust::copy(
d_radices.begin() + sort_buffers.selector * n_suffixes,
d_radices.begin() + sort_buffers.selector * n_suffixes + n_collected,
output_radices );
timer.stop();
d_copy_time += timer.seconds();
return n_collected;
}
thrust::device_vector<uint32> d_indices;
thrust::device_vector<word_type> d_radices;
thrust::device_vector<uint32> d_buckets;
thrust::device_vector<uint8> d_temp_storage;
float d_setup_time;
float d_flatten_time;
float d_count_sort_time;
float d_collect_sort_time;
float d_remap_time;
float d_copy_time;
float d_filter_time;
};
//
// A host-side radix extractor context
//
template <typename string_set_type, uint32 SYMBOL_SIZE, uint32 DOLLAR_BITS, uint32 WORD_BITS>
struct HostStringSetRadices
{
HostStringSetRadices(const string_set_type string_set) : m_string_set( string_set ) {}
/// return the number of words needed to represent a given string length
///
uint32 num_words(const uint32 max_string_len) const
{
const uint32 SYMBOLS_PER_WORD = priv::symbols_per_word<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS>();
return (max_string_len + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
}
/// needed amount of device storage
///
uint64 needed_device_memory(const uint32 n_suffixes) const
{
return n_suffixes * sizeof(uint8) + // d_symbols
n_suffixes * sizeof(uint32); // d_active_suffixes
}
/// reserve any temporary space for the given amount of suffixes
///
void reserve(const uint32 n_suffixes, const uint32 block_size)
{
try
{
d_active_suffixes.resize( n_suffixes );
d_symbols.resize( n_suffixes );
h_symbols.resize( n_suffixes );
h_active_suffixes.resize( n_suffixes );
m_block.resize( n_suffixes * block_size );
}
catch (...)
{
log_error(stderr, "HostStringSetRadices::reserve() : allocation failed!\n");
throw;
}
}
/// initialize the suffixes to extract
///
void init(const uint32 n_suffixes, const uint2* _h_suffixes, const uint2* _d_suffixes)
{
m_suffixes = _h_suffixes;
d_suffixes = thrust::device_ptr<const uint2>( _d_suffixes );
}
/// initialize extraction of a slice of words
///
void init_slice(
const uint32 n_indices,
const uint32* d_indices,
const uint32 word_block_begin,
const uint32 word_block_end)
{
try
{
if (d_indices == NULL)
{
// extract the given radix word from each of the partially sorted suffixes on the host
priv::extract_radices(
m_string_set,
n_indices,
word_block_begin,
word_block_end,
WORD_BITS,
m_suffixes,
&m_block[0],
word_block_begin == 0 ? &h_symbols[0] : NULL ); // on the first iteration, load the BWT symbols too
}
else
{
// gather the list of active suffixes
thrust::gather(
thrust::device_ptr<const uint32>( d_indices ),
thrust::device_ptr<const uint32>( d_indices ) + n_indices,
d_suffixes,
d_active_suffixes.begin() );
// copy the list of active suffixes to the host
thrust::copy(
d_active_suffixes.begin(),
d_active_suffixes.begin() + n_indices,
h_active_suffixes.begin() );
// extract the given radix word from each of the partially sorted suffixes on the host
priv::extract_radices(
m_string_set,
n_indices,
word_block_begin,
word_block_end,
WORD_BITS,
&h_active_suffixes[0],
&m_block[0] );
}
}
catch (...)
{
log_error(stderr, "HostStringSetRadices::init_slice() : exception caught!\n");
throw;
}
}
/// extract the radices corresponding to a given word of the given suffixes
///
/// \param n_indices the input number of suffixes
/// \param d_indices a device vector of the indices to extract
/// \param word_idx the word index to extract
/// \param word_begin the beginning of the current slice range
/// \param word_idx the end of the current slice range
/// \param d_radices the destination device array to hold the output
///
void extract(
const uint32 n_indices,
const uint32* d_indices,
const uint32 word_idx,
const uint32 word_block_begin,
const uint32 word_block_end,
uint32* d_radices) const
{
try
{
// and copy them to the device
thrust::copy(
m_block.begin() + n_indices * (word_idx - word_block_begin),
m_block.begin() + n_indices * (word_idx - word_block_begin) + n_indices,
thrust::device_ptr<uint32>( d_radices ) );
}
catch (...)
{
log_error(stderr, "HostStringSetRadices::extract() : exception caught!\n");
throw;
}
}
/// extract the bwt of the given block
///
void dollar_bwt(
const uint32 begin,
const uint32 end,
uint8* h_bwt)
{
const int n_strings = int( end - begin );
// fetch the BWT symbols for the given strings
#pragma omp parallel for
for (int i = 0; i < n_strings; ++i)
{
const priv::string_set_bwt_functor<string_set_type> bwt( m_string_set );
h_bwt[i] = bwt( i + begin );
}
}
/// extract the bwt of the given block
///
void bwt(
const uint32 n_suffixes,
const uint32* d_indices,
uint8* h_bwt,
uint8* d_bwt)
{
try
{
if (d_indices != NULL)
{
#if 0
// fetch the BWT symbols for this block of suffixes
#pragma omp parallel for
for (int i = 0; i < n_suffixes; ++i)
{
const priv::string_set_bwt_functor<string_set_type> bwt( m_string_set );
h_symbols[i] = bwt( m_suffixes[i] );
}
#endif
#if 0
alloc_storage( m_block, n_suffixes );
// re-purpose the radix-block storage
uint32* h_indices = &m_block[0];
// copy the sorted indices to the host
thrust::copy(
thrust::device_ptr<const uint32>( d_indices ),
thrust::device_ptr<const uint32>( d_indices ) + n_suffixes,
h_indices );
// and compute the bwt of the block by gathering the symbols in suffix-sorted order
thrust::gather(
h_indices,
h_indices + n_suffixes,
h_symbols.begin(),
h_bwt );
#else
alloc_storage( d_symbols, n_suffixes );
// copy the symbols to the device
thrust::copy(
h_symbols.begin(),
h_symbols.begin() + n_suffixes,
d_symbols.begin() );
// gather the symbols in proper order
thrust::gather(
thrust::device_ptr<const uint32>( d_indices ),
thrust::device_ptr<const uint32>( d_indices ) + n_suffixes,
d_symbols.begin(),
thrust::device_ptr<uint8>( d_bwt ) );
// and copy the sorted symbols back to the host
thrust::copy(
thrust::device_ptr<uint8>( d_bwt ),
thrust::device_ptr<uint8>( d_bwt ) + n_suffixes,
h_bwt );
#endif
}
else
{
// fetch the BWT symbols for this block of suffixes
#pragma omp parallel for
for (int i = 0; i < n_suffixes; ++i)
{
const priv::string_set_bwt_functor<string_set_type> bwt( m_string_set );
h_bwt[i] = bwt( m_suffixes[i] );
}
// and copy the sorted symbols back to the device
thrust::copy(
h_bwt,
h_bwt + n_suffixes,
thrust::device_ptr<uint8>( d_bwt ) );
}
}
catch (...)
{
log_error(stderr, "HostStringSetRadices::bwt() : exception caught!\n");
throw;
}
}
/// return the amount of used device memory
///
uint64 allocated_device_memory() const
{
return
d_active_suffixes.size() * sizeof(uint2) +
d_symbols.size() * sizeof(uint8);
}
/// return the amount of used device memory
///
uint64 allocated_host_memory() const
{
return
m_block.size() * sizeof(uint2) +
h_active_suffixes.size() * sizeof(uint2) +
h_symbols.size() * sizeof(uint8);
}
string_set_type m_string_set;
const uint2* m_suffixes;
thrust::device_ptr<const uint2> d_suffixes;
thrust::device_vector<uint2> d_active_suffixes;
thrust::device_vector<uint8> d_symbols;
thrust::host_vector<uint2> h_active_suffixes;
thrust::host_vector<uint8> h_symbols;
thrust::host_vector<uint32> m_block;
};
//
// A host-side radix extractor context
//
template <typename string_set_type, uint32 SYMBOL_SIZE, uint32 DOLLAR_BITS, uint32 WORD_BITS>
struct DeviceStringSetRadices
{
DeviceStringSetRadices() {}
DeviceStringSetRadices(const string_set_type string_set) : m_string_set( string_set ) {}
/// return the number of words needed to represent a given string length
///
uint32 num_words(const uint32 max_string_len) const
{
const uint32 SYMBOLS_PER_WORD = priv::symbols_per_word<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS>();
return (max_string_len + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
}
/// needed amount of device storage
///
uint64 needed_device_memory(const uint32 n_suffixes) const
{
return n_suffixes; // d_symbols
}
/// reserve any temporary space for the given amount of suffixes
///
void reserve(const uint32 n_suffixes, const uint32 slice_size)
{
d_symbols.resize( n_suffixes );
}
/// set string set
///
void set(const string_set_type string_set) { m_string_set = string_set; }
/// initialize the suffixes to extract
///
void init(const uint32 n_suffixes, const uint2* _h_suffixes, const uint2* _d_suffixes)
{
d_suffixes = thrust::device_ptr<const uint2>( _d_suffixes );
}
/// initialize extraction of a slice of words
///
void init_slice(
const uint32 n_indices,
const uint32* d_indices,
const uint32 word_block_begin,
const uint32 word_block_end) {}
/// extract the radices corresponding to a given word of the given suffixes
///
/// \param n_indices the input number of suffixes
/// \param d_indices a device vector of the indices to extract
/// \param word_idx the word index to extract
/// \param word_begin the beginning of the current slice range
/// \param word_idx the end of the current slice range
/// \param d_radices the destination device array to hold the output
///
void extract(
const uint32 n_indices,
const uint32* d_indices,
const uint32 word_idx,
const uint32 word_block_begin,
const uint32 word_block_end,
uint32* d_radices) const
{
// extract the given radix word from each of the partially sorted suffixes in a device temp buffer
priv::local_set_suffix_word_functor<SYMBOL_SIZE,WORD_BITS,DOLLAR_BITS,string_set_type,uint32> word_functor( m_string_set, word_idx );
if (d_indices == NULL)
{
thrust::copy(
thrust::make_transform_iterator( d_suffixes, word_functor ),
thrust::make_transform_iterator( d_suffixes, word_functor ) + n_indices,
thrust::device_ptr<uint32>( d_radices ) );
}
else
{
thrust::copy(
thrust::make_transform_iterator( thrust::make_permutation_iterator( d_suffixes, thrust::device_ptr<const uint32>( d_indices ) ), word_functor ),
thrust::make_transform_iterator( thrust::make_permutation_iterator( d_suffixes, thrust::device_ptr<const uint32>( d_indices ) ), word_functor ) + n_indices,
thrust::device_ptr<uint32>( d_radices ) );
}
}
/// extract the bwt of the given block
///
void dollar_bwt(
const uint32 begin,
const uint32 end,
uint8* h_bwt)
{
const int n_strings = end - begin;
alloc_storage( d_symbols, n_strings );
// fetch the BWT symbols for the given strings
thrust::transform(
thrust::make_counting_iterator<uint32>(begin),
thrust::make_counting_iterator<uint32>(end),
d_symbols.begin(),
priv::string_set_bwt_functor<string_set_type>( m_string_set ) );
// and copy the result to the host
thrust::copy(
d_symbols.begin(),
d_symbols.begin() + n_strings,
h_bwt );
}
/// extract the bwt of the given block
///
void bwt(
const uint32 n_suffixes,
const uint32* d_indices,
uint8* h_bwt,
uint8* d_bwt)
{
if (d_indices != NULL)
{
alloc_storage( d_symbols, n_suffixes );
// fetch the BWT symbols for this block of suffixes
thrust::transform(
d_suffixes,
d_suffixes + n_suffixes,
d_symbols.begin(),
priv::string_set_bwt_functor<string_set_type>( m_string_set ) );
// and compute the bwt of the block by gathering the symbols in suffix-sorted order
thrust::gather(
thrust::device_ptr<const uint32>( d_indices ),
thrust::device_ptr<const uint32>( d_indices ) + n_suffixes,
d_symbols.begin(),
thrust::device_ptr<uint8>( d_bwt ) );
}
else
{
// fetch the BWT symbols for this block of suffixes
thrust::transform(
d_suffixes,
d_suffixes + n_suffixes,
thrust::device_ptr<uint8>( d_bwt ),
priv::string_set_bwt_functor<string_set_type>( m_string_set ) );
}
// and copy the result to the host
thrust::copy(
thrust::device_ptr<uint8>( d_bwt ),
thrust::device_ptr<uint8>( d_bwt ) + n_suffixes,
h_bwt );
}
/// return the amount of used device memory
///
uint64 allocated_device_memory() const
{
return d_symbols.size() * sizeof(uint8);
}
/// return the amount of used device memory
///
uint64 allocated_host_memory() const
{
return 0u;
}
string_set_type m_string_set;
thrust::device_ptr<const uint2> d_suffixes;
thrust::device_vector<uint8> d_symbols;
};
/// Collect dollar symbols out of a BWT + SA block
///
struct DollarExtractor
{
/// constructor
///
DollarExtractor() :
offset(0),
n_dollars(0) {}
/// process a batch of BWT symbols
///
uint32 extract(
const uint32 n_suffixes,
const uint8* h_bwt,
const uint8* d_bwt,
const uint2* h_suffixes,
const uint2* d_suffixes,
const uint32* d_indices);
uint64 offset;
uint32 n_dollars;
thrust::device_vector<uint64> d_dollar_ranks;
thrust::device_vector<uint32> d_dollar_indices;
thrust::device_vector<uint64> d_dollars;
thrust::host_vector<uint64> h_dollar_ranks;
thrust::host_vector<uint64> h_dollars;
thrust::device_vector<uint8> d_temp_storage;
};
// ------------------------------------------------------------------------------------------------------------- //
// the following functions implement device_copy() and device_scatter() - special-purpose functions to copy
// and scatter a set of symbols to a packed stream.
// ------------------------------------------------------------------------------------------------------------- //
/// a simple auxiliary kernel to perform generic device-to-device copies, specialized for packed streams
///
template <typename input_iterator, typename output_iterator, typename index_type>
__global__ void simple_device_copy_kernel(
const uint32 n,
const input_iterator input,
output_iterator output,
const index_type offset)
{
const uint32 thread_id = threadIdx.x + blockIdx.x*blockDim.x;
if (thread_id < n)
output[offset + thread_id] = input[thread_id];
}
/// a simple auxiliary kernel to perform generic device-to-device copies, specialized for packed streams
///
template <typename input_iterator, typename storage_type, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename index_type>
__global__ void packed_device_copy_kernel(
const uint32 n,
const input_iterator input,
PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type> output,
const index_type offset)
{
const uint32 thread_id = threadIdx.x + blockIdx.x*blockDim.x;
//
// care must be used to avoid write-conflicts, hence we assign all symbols belonging
// to the same output word to a single thread
//
typedef typename PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>::storage_type word_type;
const uint32 SYMBOLS_PER_WORD = (8u*sizeof(word_type))/SYMBOL_SIZE;
const uint32 word_offset = uint32( (offset + output.index()) & (SYMBOLS_PER_WORD-1) );
const uint32 elem_begin = thread_id ? (thread_id+0) * SYMBOLS_PER_WORD - word_offset : 0u;
const uint32 elem_end = nvbio::min( (thread_id+1) * SYMBOLS_PER_WORD - word_offset, n );
if (elem_begin < n)
{
for (uint32 i = elem_begin; i < elem_end; ++i)
output[offset+i] = input[i];
}
}
/// a dispatcher for device_copy
///
template <typename input_iterator, typename output_iterator, typename index_type>
struct device_copy_dispatch
{
/// copy n elements from the input stream to the output
///
static void copy(
const uint32 n,
const input_iterator input,
const output_iterator output,
const index_type offset)
{
const uint32 batch_size = cuda::max_grid_size();
for (uint32 batch_begin = 0; batch_begin < n; batch_begin += batch_size)
{
const uint32 batch_end = nvbio::min( batch_begin + batch_size, n );
const uint32 blockdim = 128;
const uint32 n_blocks = util::divide_ri( batch_end - batch_begin, blockdim );
simple_device_copy_kernel<<<n_blocks,blockdim>>>( n, input, output, offset );
}
}
};
/// a dispatcher for device_copy
///
template <typename input_iterator, typename storage_type, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename index_type>
struct device_copy_dispatch<
input_iterator,
PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>,
index_type>
{
typedef PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type> output_iterator;
/// copy n elements from the input stream to the output
///
static void copy(
const uint32 n,
const input_iterator input,
const output_iterator output,
const index_type offset)
{
typedef typename PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>::storage_type word_type;
const uint32 SYMBOLS_PER_WORD = (8u*sizeof(word_type))/SYMBOL_SIZE;
const uint32 batch_size = cuda::max_grid_size();
for (uint32 batch_begin = 0; batch_begin < n; batch_begin += batch_size)
{
const uint32 batch_end = nvbio::min( batch_begin + batch_size, n );
const uint32 blockdim = 128;
const uint32 n_words = util::divide_ri( batch_end - batch_begin, SYMBOLS_PER_WORD ) + 1u;
const uint32 n_blocks = util::divide_ri( n_words, blockdim );
packed_device_copy_kernel<<<n_blocks,blockdim>>>( batch_end - batch_begin, input, output, offset + batch_begin );
}
}
};
/// copy a set of n symbols from a given input stream to a given output stream
///
template <typename input_iterator, typename output_iterator, typename index_type>
void device_copy(
const uint32 n,
const input_iterator input,
const output_iterator output,
const index_type offset)
{
device_copy_dispatch<input_iterator,output_iterator,index_type>::copy( n, input, output, offset );
}
/// an auxiliary kernel to scatter a set of symbols into a sparse set of slots of a given output stream;
/// this kernel copies a full range of symbols per thread, where individual ranges are guaranteed to
/// touch distinct words of the underlying storage where the output is packed.
///
template <typename input_iterator, typename slot_iterator, typename range_iterator, typename storage_type, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename index_type>
__global__ void device_scatter_kernel(
const uint32 begin,
const uint32 end,
const range_iterator ranges,
const input_iterator input,
const slot_iterator slots,
PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type> output)
{
const uint32 thread_id = threadIdx.x + blockIdx.x*blockDim.x;
const uint32 idx = thread_id + begin;
if (idx >= end)
return;
//
// care must be used to avoid write-conflicts, hence we assign all symbols belonging
// to the same output word to a single thread
//
const uint32 elem_begin = idx ? ranges[ idx-1 ] : 0u;
const uint32 elem_end = ranges[ idx ];
for (uint32 i = elem_begin; i < elem_end; ++i)
{
const uint32 slot = slots[i];
output[ slot ] = input[i];
}
}
/// scatter a set of symbols into a sparse set of slots of a given output stream
///
template <typename input_iterator, typename slot_iterator, typename output_iterator>
struct device_scatter_dispatch
{
static void enact(
const uint32 n,
const input_iterator input,
const slot_iterator slots,
output_iterator output)
{
thrust::scatter(
input,
input + n,
slots,
output );
}
};
/// scatter a set of symbols into a sparse set of slots of a given output stream
///
template <typename input_iterator, typename slot_iterator, typename storage_type, uint32 SYMBOL_SIZE, bool BIG_ENDIAN, typename index_type>
struct device_scatter_dispatch<
input_iterator,
slot_iterator,
PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type> >
{
typedef PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type> output_iterator;
static void enact(
const uint32 n,
const input_iterator input,
const slot_iterator slots,
output_iterator output)
{
// find out a set of ranges of input symbols covering distinct words in the output: this is done
// looking at the words covered by each of the symbols, and reducing together all symbols falling
// in the same word.
thrust::device_vector<uint32> d_ranges( n );
thrust::device_vector<uint32> d_keys( n );
typedef typename PackedStream<storage_type,uint8,SYMBOL_SIZE,BIG_ENDIAN,index_type>::storage_type word_type;
const uint32 SYMBOLS_PER_WORD = (8u*sizeof(word_type))/SYMBOL_SIZE;
const uint32 n_ranges = uint32( thrust::reduce_by_key(
thrust::make_transform_iterator( slots, add_divide_functor( output.index(), SYMBOLS_PER_WORD ) ),
thrust::make_transform_iterator( slots + n, add_divide_functor( output.index(), SYMBOLS_PER_WORD ) ),
thrust::make_counting_iterator<uint32>(1u),
d_keys.begin(),
d_ranges.begin(),
thrust::equal_to<uint32>(),
thrust::maximum<uint32>() ).first - d_keys.begin() );
const uint32 batch_size = cuda::max_grid_size();
for (uint32 batch_begin = 0; batch_begin < n_ranges; batch_begin += batch_size)
{
const uint32 batch_end = nvbio::min( batch_begin + batch_size, n_ranges );
// at this point we can scatter the identified ranges
const uint32 blockdim = 128;
const uint32 n_blocks = util::divide_ri( batch_end - batch_begin, blockdim );
device_scatter_kernel<<<n_blocks,blockdim>>>(
batch_begin,
batch_end,
d_ranges.begin(),
input,
slots,
output );
}
}
};
/// scatter a set of symbols into a sparse set of slots of a given output stream
///
template <typename input_iterator, typename slot_iterator, typename output_iterator>
void device_scatter(
const uint32 n,
const input_iterator input,
const slot_iterator slots,
output_iterator output)
{
device_scatter_dispatch<input_iterator,slot_iterator,output_iterator>::enact(
n,
input,
slots,
output );
}
// ------------------------------------------------------------------------------------------------------------- //
/// pack a set of head flags into a bit-packed array
///
void pack_flags(
const uint32 n,
const uint8* flags,
uint32* comp_flags);
/// build a set of head flags looking at adjacent keys
///
void build_head_flags(
const uint32 n,
const uint32* keys,
uint8* flags);
/// build a set of head flags looking at adjacent keys
///
void build_head_flags(
const uint32 n,
const uint64* keys,
uint8* flags);
} // namespace priv
} // namespace nvbio
|
parallel-walsh.c | #include <stdlib.h>
#include <stdint.h>
void fast_parallel_walsh(int* vec, int vecSize)
{
// if the vector is of size 1 or less (0), we dont need to do anything and we can just return it as is
if(vecSize<=1) return;
// start to divide the original vecSize into 2 on every step, until we can't divide any longer
register int partition = vecSize >> 1;
//how many sections do we have to go over in this iteration
register int sections = 1;
while(partition>0){
// go over all sections - this section can be parallel!
#pragma omp parallel for schedule(static) collapse(2)
for(register int i=0; i<sections; i++) {
// go over all the current vector to calculate the new values
for (register int j = 0; j < partition; j++) {
// in here we want to have two indexes to vec: 0 and 0+partitionSize/2
register int firstPart = (i<<1)*partition+j;
register int secondPart = firstPart+partition;
register int tmp = vec[firstPart];
vec[firstPart] = tmp + vec[secondPart];
vec[secondPart] = tmp - vec[secondPart];
}
}
partition >>= 1;
sections <<= 1;
}
}
int numberOfSetBits(uint32_t i)
{
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
}
int* createCol(int colNum, int vecSize){
int* col = (int*)malloc(sizeof(int)*vecSize);
if(!col) exit(1);
for(int i=0; i<vecSize; i++){
col[i] = (numberOfSetBits(i&colNum) % 2 == 0)*2 - 1;
}
return col;
}
void simple_parallel_walsh(int* vec, int vecSize)
{
// if the vector is of size 1 or less (0), we dont need to do anything and we can just return it as is
if(vecSize<=1) return;
//allocate a tmp vector to save the results until we're done.
int* tmpRes = (int*)malloc(sizeof(int)*vecSize);
if(!tmpRes) exit(1);
// let's calc the value in each cell in the vector
//in this section, we can create multiple threads to compute this part
#pragma omp for schedule(static)
for(int i=0; i<vecSize; i++){
tmpRes[i]=0;
//first, create the right col from the hadamard matrix
int* col = createCol(i,vecSize);
//go over the created col and the original vec to multiple
for(int j=0; j<vecSize; j++){
tmpRes[i] += col[j]*vec[j];
}
//free the col before next iteration allocation
free(col);
}
//move the results from the tmpVec to vec, and fre tmpVec
//all threads must finish calculating before moving to copy the results, and free-ing the tmpRes vector
#pragma omp barrier
#pragma omp for schedule(static)
for(int i=0; i<vecSize; i++){
vec[i]=tmpRes[i];
}
free(tmpRes);
}
|
sdiv.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include "sptensor.h"
int ptiSparseTensorDivScalar(ptiSparseTensor *X, ptiValue const a) {
if(a != 0) {
ptiNnzIndex i;
#pragma omp parallel for schedule(static)
for(i = 0; i < X->nnz; ++i) {
X->values.data[i] /= a;
}
return 0;
} else {
pti_CheckError(PTIERR_ZERO_DIVISION, "SpTns Div", "divide by zero");
}
return 0;
}
|
nodal_residualbased_elimination_builder_and_solver_continuity.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY)
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverContinuity
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverContinuity
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuity);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverContinuity(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuity") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverContinuity() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void BuildNodally(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double pressure = 0;
double deltaPressure = 0;
double meanMeshSize = 0;
double characteristicLength = 0;
double density = 0;
double nodalVelocityNorm = 0;
double tauStab = 0;
double dNdXi = 0;
double dNdYi = 0;
double dNdZi = 0;
double dNdXj = 0;
double dNdYj = 0;
double dNdZj = 0;
unsigned int firstRow = 0;
unsigned int firstCol = 0;
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
LHS_Contribution = ZeroMatrix(neighSize, neighSize);
RHS_Contribution = ZeroVector(neighSize);
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1)
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId();
}
firstRow = 0;
firstCol = 0;
meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
characteristicLength = 1.0 * meanMeshSize;
density = itNode->FastGetSolutionStepValue(DENSITY);
/* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */
if (dimension == 2)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y));
}
else if (dimension == 3)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) +
itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z));
}
tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval);
itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab;
/* std::cout<<"tauStab= "<<tauStab<<std::endl; */
LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval);
RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval);
if (itNode->Is(FREE_SURFACE))
{
// // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA);
// /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */
// /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */
LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0);
const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL);
Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1);
/* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */
double nodalNormalAcceleration = 0;
double nodalNormalProjDefRate = 0;
if (dimension == 2)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */
// nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] +
// (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1];
nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1];
}
else if (dimension == 3)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] +
2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */
/* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */
}
// RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea;
double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize;
double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume;
}
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
if (i != 0)
{
// i==0 of EquationIs has been already filled with the master node (that is not included in neighb_nodes). The next is stored for i+1
EquationId[i] = neighb_nodes[i - 1].GetDof(PRESSURE, xDofPos).EquationId();
// at i==0 density and volume acceleration are taken from the master node
density = neighb_nodes[i - 1].FastGetSolutionStepValue(DENSITY);
// VolumeAcceleration = neighb_nodes[i-1].FastGetSolutionStepValue(VOLUME_ACCELERATION);
// // posX= neighb_nodes[i-1].X();
// // posY= neighb_nodes[i-1].Y();
// // coeffX =(12.0-24.0*posY)*pow(posX,4);
// // coeffX += (-24.0+48.0*posY)*pow(posX,3);
// // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// // coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
}
if (dimension == 2)
{
// RHS_Contribution[i] += - tauStab * density * (dNdXi* VolumeAcceleration[0]*coeffX + dNdYi* VolumeAcceleration[1]*coeffY) * nodalVolume;
RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume;
}
else if (dimension == 3)
{
dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2];
RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume;
}
firstRow = 0;
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
// double Vx=itNode->FastGetSolutionStepValue(VELOCITY_X,0);
// double Vy=itNode->FastGetSolutionStepValue(VELOCITY_Y,0);
if (j != 0)
{
pressure = neighb_nodes[j - 1].FastGetSolutionStepValue(PRESSURE, 0);
// Vx= neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X,0);
// Vy= neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y,0);
// meanMeshSize=neighb_nodes[j-1].FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
// characteristicLength=2.0*meanMeshSize;
// density=neighb_nodes[j-1].FastGetSolutionStepValue(DENSITY);
// if(dimension==2){
// nodalVelocityNorm= sqrt(neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X) +
// neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y));
// }else if(dimension==3){
// nodalVelocityNorm=sqrt(neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X) +
// neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y) +
// neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Z)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Z));
// }
}
else
{
pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0);
// meanMeshSize=itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
// characteristicLength=2.0*meanMeshSize;
// density=itNode->FastGetSolutionStepValue(DENSITY);
// if(dimension==2){
// nodalVelocityNorm= sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) +
// itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y));
// }else if(dimension==3){
// nodalVelocityNorm=sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) +
// itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y) +
// itNode->FastGetSolutionStepValue(VELOCITY_Z)*itNode->FastGetSolutionStepValue(VELOCITY_Z));
// }
}
// tauStab= 1.0 * (characteristicLength * characteristicLength * timeInterval) / ( density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval );
if (dimension == 2)
{
// // ////////////////// Laplacian term for LHS
LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume;
// // ////////////////// Laplacian term L_ij*P_j for RHS
RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume * pressure;
// RHS_Contribution[i] += (dNdXj*Vx + dNdYj*Vy)*nodalVolume/3.0;
// LHS_Contribution(i,j)+= nodalVolume/volumetricCoeff/(1.0+double(neighSize));
// if(i==j){
// RHS_Contribution[i] += (-deltaPressure/volumetricCoeff )*nodalVolume;
// }
}
else if (dimension == 3)
{
dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2];
////////////////// Laplacian term for LHS
LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume;
////////////////// Laplacian term L_ij*P_j for RHS
RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume * pressure;
}
firstRow += dimension;
}
firstCol += dimension;
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
KRATOS_CATCH("")
}
void BuildNodallyUnlessLaplacian(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
double meanMeshSize = 0;
double characteristicLength = 0;
double density = 0;
double nodalVelocityNorm = 0;
double tauStab = 0;
double dNdXi = 0;
double dNdYi = 0;
double dNdZi = 0;
unsigned int firstCol = 0;
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
LHS_Contribution = ZeroMatrix(neighSize, neighSize);
RHS_Contribution = ZeroVector(neighSize);
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1)
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId();
}
firstCol = 0;
meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
characteristicLength = 1.0 * meanMeshSize;
density = itNode->FastGetSolutionStepValue(DENSITY);
/* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */
if (dimension == 2)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y));
}
else if (dimension == 3)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) +
itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z));
}
tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) /
(density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval);
itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab;
LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval);
RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval);
if (itNode->Is(FREE_SURFACE))
{
// // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA);
// /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */
// /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */
LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0);
array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL);
Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1);
/* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */
double nodalNormalAcceleration = 0;
double nodalNormalProjDefRate = 0;
if (dimension == 2)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */
// nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] +
// (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1];
nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1];
}
else if (dimension == 3)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] +
2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */
/* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */
}
// RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea;
double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize;
double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume;
}
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
if (i != 0)
{
// i==0 of EquationIs has been already filled with the master node (that is not included in neighb_nodes). The next is stored for i+1
EquationId[i] = neighb_nodes[i - 1].GetDof(PRESSURE, xDofPos).EquationId();
// at i==0 density and volume acceleration are taken from the master node
density = neighb_nodes[i - 1].FastGetSolutionStepValue(DENSITY);
// // VolumeAcceleration = neighb_nodes[i-1].FastGetSolutionStepValue(VOLUME_ACCELERATION);
// // posX= neighb_nodes[i-1].X();
// // posY= neighb_nodes[i-1].Y();
// // coeffX =(12.0-24.0*posY)*pow(posX,4);
// // coeffX += (-24.0+48.0*posY)*pow(posX,3);
// // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// // coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
}
if (dimension == 2)
{
// RHS_Contribution[i] += - tauStab * density * (dNdXi* VolumeAcceleration[0]*coeffX + dNdYi* VolumeAcceleration[1]*coeffY) * nodalVolume;
RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume;
}
else if (dimension == 3)
{
dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2];
RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume;
}
firstCol += dimension;
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
KRATOS_CATCH("")
}
void BuildNodallyNoVolumetricStabilizedTerms(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
double meanMeshSize = 0;
double characteristicLength = 0;
double density = 0;
double nodalVelocityNorm = 0;
double tauStab = 0;
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
LHS_Contribution = ZeroMatrix(neighSize, neighSize);
RHS_Contribution = ZeroVector(neighSize);
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1)
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId();
}
meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE);
characteristicLength = 1.0 * meanMeshSize;
density = itNode->FastGetSolutionStepValue(DENSITY);
/* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */
if (dimension == 2)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y));
}
else if (dimension == 3)
{
nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) +
itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) +
itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z));
}
tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) /
(density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval);
itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab;
LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval);
RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval);
if (itNode->Is(FREE_SURFACE))
{
// // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA);
// /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */
// /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */
LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0);
array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL);
Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1);
/* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */
double nodalNormalAcceleration = 0;
double nodalNormalProjDefRate = 0;
if (dimension == 2)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */
// nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] +
// (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1];
nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1];
}
else if (dimension == 3)
{
nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] +
2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2];
/* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */
/* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */
}
// RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea;
double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize;
double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize);
RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume;
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
KRATOS_CATCH("")
}
void BuildNodallyNotStabilized(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
/* #pragma omp parallel */
{
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
LHS_Contribution = ZeroMatrix(neighSize, neighSize);
RHS_Contribution = ZeroVector(neighSize);
if (EquationId.size() != neighSize)
EquationId.resize(neighSize, false);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1)
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId();
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
}
KRATOS_CATCH("")
}
void BuildAll(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
// if (LHS_Contribution.size1() != neighSize)
// LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// if (RHS_Contribution.size() != neighSize)
// RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// LHS_Contribution= ZeroMatrix(neighSize,neighSize);
// RHS_Contribution= ZeroVector(neighSize);
// if (EquationId.size() != neighSize)
// EquationId.resize(neighSize, false);
if (LHS_Contribution.size1() != 1)
LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != 1)
RHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
LHS_Contribution = ZeroMatrix(1, 1);
RHS_Contribution = ZeroVector(1);
if (EquationId.size() != 1)
EquationId.resize(1, false);
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if (nodalVolume > 0)
{ // in interface nodes not in contact with fluid elements the nodal volume is zero
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1)
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
}
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
//}
}
// }
ElementsArrayType &pElements = rModelPart.Elements();
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType elementalEquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
//(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(elementalLHS_Contribution, elementalRHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (elementalEquationId.size() != geom.size())
elementalEquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
elementalEquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId, lock_array);
#else
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId);
#endif
}
}
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
Timer::Start("Build");
/* boost::timer c_build_time; */
///////////////////////////////// ALL NODAL /////////////////////////////////
//BuildNodally(pScheme, rModelPart, A, b);
///////////////////////////////// ALL NODAL /////////////////////////////////
// /////////////////////// NODAL + ELEMENTAL LAPLACIAN ///////////////////////
//BuildNodallyUnlessLaplacian(pScheme, rModelPart, A, b);
//Build(pScheme, rModelPart, A, b);
// /////////////////////// NODAL + ELEMENTAL LAPLACIAN ///////////////////////
//////////////// NODAL + ELEMENTAL VOLUMETRIC STABILIZED TERMS////////////////
//BuildNodallyNoVolumetricStabilizedTerms(pScheme, rModelPart, A, b);
//Build(pScheme, rModelPart, A, b);
// /////////////////////// NODAL + ELEMENTAL LAPLACIAN ///////////////////////
/////////////////////// NODAL + ELEMENTAL STABILIZATION //////////////////////
// BuildNodallyNotStabilized(pScheme, rModelPart, A, b);
// Build(pScheme, rModelPart, A, b);
BuildAll(pScheme, rModelPart, A, b);
/////////////////////// NODAL + ELEMENTAL STABILIZATION //////////////////////
//////////////////////// ALL ELEMENTAL (FOR HYBRID) //////////////////////////
//Build(pScheme, rModelPart, A, b);
//////////////////////// ALL ELEMENTAL (FOR HYBRID) //////////////////////////
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
/* const double start_solve = OpenMPUtils::GetCurrentTime(); */
Timer::Start("Solve");
/* boost::timer c_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
/* const double stop_solve = OpenMPUtils::GetCurrentTime(); */
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart &r_model_part,
TSystemMatrixType &A,
TSystemVectorType &b) override
{
KRATOS_TRY
if (!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType &pElements = r_model_part.Elements();
// //getting the array of the conditions
// ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//create a partition of the element array
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
double start_prod = OpenMPUtils::GetCurrentTime();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo &CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
//(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (EquationId.size() != geom.size())
EquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
EquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
#else
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
if (this->GetEchoLevel() > 0)
{
double stop_prod = OpenMPUtils::GetCurrentTime();
std::cout << "parallel building time: " << stop_prod - start_prod << std::endl;
}
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
// #pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
// ConditionsArrayType& pConditions = rModelPart.Conditions();
// const int nconditions = static_cast<int>(pConditions.size());
// #pragma omp parallel for firstprivate(nconditions, ElementalDofList)
// for (int i = 0; i < nconditions; i++)
// {
// typename ConditionsArrayType::iterator it = pConditions.begin() + i;
// const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// // gets list of Dof involved on every element
// pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
// dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
// }
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back((*it));
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
/* boost::timer c_contruct_matrix; */
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
/* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId
#ifdef _OPENMP
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t>> temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem < number_of_elements; ++i_elem)
{
auto it_elem = el_begin + i_elem;
pScheme->EquationId(*(it_elem.base()), ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond < number_of_conditions; ++i_cond)
{
auto it_cond = cond_begin + i_cond;
pScheme->Condition_EquationId(*(it_cond.base()), ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i)
{
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int> &partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
void AssembleRHS(
TSystemVectorType &b,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuity */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
main.c | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "define.c"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "BenchmarksUtil.h"
#include <sys/time.h>
#define ERROR_THRESHOLD 0.05
int compareResults(fp *image, fp *image_cpu, int Ne) {
int i, fail;
fail = 0;
for (i = 0; i < Ne; i++) {
if (percentDiff(image[i], image_cpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
//====================================================================================================100
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
//====================================================================================================100
int main(int argc, char *argv[]) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
double t_start, t_end, t_gpu, t_cpu;
time0 = get_time();
// inputs image, input paramenters
fp *image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp *image, *image_cpu; // input image
int Nr, Nc; // IMAGE nbr of rows/cols/elements
int Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// ROI statistics
fp meanROI, varROI, q0sqr; // local region statistics
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// center pixel value
fp Jc;
// directional derivatives
fp *dN, *dS, *dW, *dE;
// calculation variables
fp tmp, sum, sum2;
fp G2, L, num, den, qsqr, D;
// diffusion coefficient
fp *c;
fp cN, cS, cW, cE;
// counters
int iter; // primary loop
int i, j; // image row/col
int k; // image single index
// number of threads
int threads;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if (argc != 6) {
printf("ERROR: wrong number of arguments\n");
return 0;
} else {
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
threads = atoi(argv[5]);
}
// omp_set_num_threads(threads);
// printf("THREAD %d\n", omp_get_thread_num());
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp *)malloc(sizeof(fp) * image_ori_elem);
read_graphics("../input/image.pgm", image_ori, image_ori_rows, image_ori_cols,
1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp *)malloc(sizeof(fp) * Ne);
image_cpu = (fp *)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, image_cpu, Nr, Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
iN = malloc(sizeof(int *) * Nr); // north surrounding element
iS = malloc(sizeof(int *) * Nr); // south surrounding element
jW = malloc(sizeof(int *) * Nc); // west surrounding element
jE = malloc(sizeof(int *) * Nc); // east surrounding element
// allocate variables for directional derivatives
dN = malloc(sizeof(fp) * Ne); // north direction derivative
dS = malloc(sizeof(fp) * Ne); // south direction derivative
dW = malloc(sizeof(fp) * Ne); // west direction derivative
dE = malloc(sizeof(fp) * Ne); // east direction derivative
// allocate variable for diffusion coefficient
c = malloc(sizeof(fp) * Ne); // diffusion coefficient
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
// #pragma omp parallel
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
// #pragma omp parallel
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// IMAGE
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
time5 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in input IMAGE
image[i] = exp(image[i] /
255); // exponentiate input IMAGE and copy to output image
image_cpu[i] =
exp(image_cpu[i] /
255); // exponentiate input IMAGE and copy to output image
}
time6 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// primary loop
// CPU
t_start = rtclock();
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image_cpu[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 += tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image_cpu[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image_cpu[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image_cpu[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image_cpu[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image_cpu[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) -
((1.0 / 16.0) * (L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
// divergence & image update
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image_cpu[k] =
image_cpu[k] +
0.25 * lambda *
D; // updates image (based on input time step and divergence)
}
}
}
t_end = rtclock();
t_cpu = t_end - t_start;
// GPU
t_start = rtclock();
#pragma omp target map( \
to : iN[ : Nr], iS[ : Nr], \
jW[ : Nc], jE[ : Nc]) map( \
tofrom : dN[ : Ne], \
dS[ : Ne], \
dW[ : Ne], \
dE[ : Ne], \
c[ : Ne], image[ : Ne]) \
device(DEVICE_ID)
{
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 +=
tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
// #pragma omp target device(DEVICE_ID)
// #pragma omp target map(to: iN[:Nr], iS[:Nr], jW[:Nc], jE[:Nc], image[:Ne] ) \
// map(tofrom: dN[:Ne], dS[:Ne], dW[:Ne], dE[:Ne], c[:Ne])
// {
#pragma omp parallel for collapse(1)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) - ((1.0 / 16.0) *
(L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
// }
// divergence & image update
// #pragma omp target device(DEVICE_ID)
// #pragma omp target map(to: c[:Ne], iS[:Nr], jE[:Nc], dN[:Ne], dS[:Ne], dW[:Ne], dE[:Ne] ) \
map(tofrom: image[:Ne])
// {
#pragma omp parallel for collapse(1)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image[k] =
image[k] +
0.25 * lambda *
D; // updates image (based on input time step and divergence)
}
}
// }
}
}
t_end = rtclock();
t_gpu = t_end - t_start;
// printf("\n");
time7 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in IMAGE
image[i] = log(image[i]) * 255; // take logarithm of image, log compress
image_cpu[i] =
log(image_cpu[i]) * 255; // take logarithm of image, log compress
}
time8 = get_time();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time9 = get_time();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(iN);
free(iS);
free(jW);
free(jE); // deallocate surrounding pixel memory
free(dN);
free(dS);
free(dW);
free(dE); // deallocate directional derivative memory
free(c); // deallocate diffusion coefficient memory
time10 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : RESIZE IMAGE\n", (float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : SETUP, MEMORY ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : EXTRACT IMAGE\n", (float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : COMPUTE\n", (float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : COMPRESS IMAGE\n",
(float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : SAVE IMAGE INTO FILE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f : FREE MEMORY\n", (float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time10 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time10 - time0) / 1000000);
printf("\n\n");
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_gpu);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_cpu);
compareResults(image, image_cpu, Ne);
free(image);
free(image_cpu);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
}
|
aux_propagate.h | #ifndef AUX_PROPAGATE_H
#define AUX_PROPAGATE_H
#include <cvpp/containers/matrix.h>
#include <cvpp/algorithms/icp/libicp.h>
#include "class_map.h"
#include "class_group.h"
using namespace cvpp;
/// Propagate objects from Next to Main
void propagateObjects( Map& prev , Map& next , Map& main ,
const int& thrctr , const double& thrfit )
{
Timer t;
SeqMatd M1p( main.size() ) , M1n( main.size() );
auto Mobs = MatZEROSd( main.size() );
/// Calculate movement
t.ptick( "Calculating Movement" );
#pragma omp parallel for
forLOOPi( prev.size() )
{
if( prev[i].size() >= thrctr && next[i].size() >= thrctr &&
0.8 * prev[i].size() < next[i].size() )
{
Matd Mp,Mn;
// if( next[i].size() < 4 * thrctr )
// {
// Mp = prev[i].getP();
// Mn = next[i].getP();
// }
// else
{
Mp = prev[i].getM();
Mn = next[i].getM();
}
LibICPd icp( Mn );
icp.fit( Mp , main[i].R , main[i].t , thrfit );
Mobs(i) = true;
}
}
/// Move main groups
t.ptick( "Moving Main Groups" );
forLOOPi( main.size() )
{
M1p[i] = main[i].getM();
M1n[i] = icp_calc( M1p[i] , main[i].R , main[i].t );
main[i].updM( M1n[i] );
}
// Remove main clusters based on next
t.ptick( "Removing Main Clusters" );
forLOOPi( main.size() )
{
if( main[i].size() >= thrctr && next[i].size() >= thrctr )
{
Matd Mm = main[i].getM();
Matd Mn = next[i].getM();
SSeqi idxs; SSeqd dsts; KDtreed tree( Mn );
tree.knnSearch( Mm , 1 , idxs , dsts );
Veci rems;
forLOOPj( idxs.size() )
if( dsts[j][0] < 0.5 * main.rad * main.rad ) rems.insert(j);
rems.update(); rems.mat().SortRows().FlipRows();
forLOOPj( rems.n() ) main[i].del( rems[j] );
}
}
/// Copy next clusters to main
t.ptick( "Copying Next Clusters" );
forLOOPij( main.size() , next[i].size() )
main[i].push_back( next[i][j] );
forLOOPii( main.size() , next.size() )
main.objs.push_back( next[i] );
// Recalculate main movement
t.ptick( "Recalculating Main Movement" );
#pragma omp parallel for
forLOOPi( M1p.size() )
{
// main[i].v = main[i].calcMov();
if( Mobs(i) )
{
Matd M2 = main[i].getM();
if( M1p[i].r() >= thrctr && M2.r() >= thrctr )
{
LibICPd icp( M2 );
icp.fit( M1p[i] , main[i].R , main[i].t , thrfit );
Matd d = ( M2 - M1p[i] ).meanRows();
double chg = ( main[i].v - d ).rsqsum();
if( chg > 0.9 )
{
main[i].t.setVal(0);
main[i].R.setIdentity();
}
else
{
if( main[i].v(0) == 0.0 ) main[i].v = d;
else main[i].v = ( main[i].v + d ) / 2.0;
if( d.rsqsum() < 0.1 ) main[i].v.setVal(0);
main[i].t = main[i].v; main[i].R.setIdentity();
}
}
}
else
{
Matd d = ( M1n[i] - M1p[i] ).meanRows();
if( main[i].v(0) == 0.0 ) main[i].v = d;
else main[i].v = ( main[i].v + d ) / 2.0;
main[i].t = main[i].v; main[i].R.setIdentity();
}
// main[i].t(2) = 0.0; // No Z motion
main[i].savePos();
}
forLOOPi( main.size() ) main[i].t.print();
// // Update weights
// t.ptick( "Updating Weights" );
// forLOOPi( Mobs.r() )
// {
// forLOOPj( main[i].size() )
// main[i][j].S -= main[i].C;
// if( !Mobs(i) )
// {
// forLOOPj( main[i].size() )
// main[i][j].wgt *= 0.95;
// }
// else
// {
// forLOOPj( main[i].size() )
// main[i][j].wgt = 1.0;
// main[i].C.setVal( 0.0 );
// }
// forLOOPj( main[i].size() )
// main[i][j].S += main[i].C;
// }
// // Removing Uncertain Groups
// t.ptick( "Removing Uncertain Groups" );
// forLOOPi( main.size() )
// {
// double w = 0;
// forLOOPj( main[i].size() ) w += main[i][j].wgt;
// w /= main[i].size(); if( w > 0 && w < 0.25 ) main[i] = Group();
// }
t.ptick( "Done" );
}
#endif
|
GB_unaryop__lnot_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_int32
// op(A') function: GB_tran__lnot_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_int32
(
uint32_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_sections.c | /*
OpenMP "sections" clause example
Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College
Mon Feb 24 22:30:57 EST 2003
Updated for CSIS-335, Siena College, Fall 2021
*/
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]) {
int thread_num;
#pragma omp parallel sections private(thread_num)
{
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("In this section, thread_num=%d\n", thread_num);
}
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("But in this section, thread_num=%d\n", thread_num);
}
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("And in this section, thread_num=%d\n", thread_num);
}
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("Over here in this section, thread_num=%d\n", thread_num);
}
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("Yet another section, with thread_num=%d\n", thread_num);
}
#pragma omp section
{
thread_num = omp_get_thread_num();
printf("Our last section, thread_num=%d\n", thread_num);
}
}
return 0;
}
|
GB_unop__lnot_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_uint64_uint64)
// op(A') function: GB (_unop_tran__lnot_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
primes.c | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <offload.h>
const int a = 1;
const int b = 3000000;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/*
* is_prime_number: Returns 1 if n is a prime number and 0 otherwise.
* This function uses trial division primality test.
*/
int is_prime_number(int n)
{
int limit = sqrt(n) + 1;
for (int i = 2; i <= limit; i++) {
if (n % i == 0)
return 0;
}
return (n > 1) ? 1 : 0;
}
int count_prime_numbers(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
return nprimes;
}
double run_host_serial()
{
double t = wtime();
int n = count_prime_numbers(a, b);
t = wtime() - t;
printf("Result (host serial): %d\n", n);
return t;
}
int count_prime_numbers_omp(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
#pragma omp parallel
{
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
#pragma omp for schedule(dynamic, 100) reduction(+:nprimes)
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
}
return nprimes;
}
double run_host_parallel()
{
double t = wtime();
int n = count_prime_numbers_omp(a, b);
t = wtime() - t;
printf("Result (host parallel): %d\n", n);
return t;
}
int main(int argc, char **argv)
{
printf("Count prime numbers in [%d, %d]\n", a, b);
double thost_serial = run_host_serial();
double thost_par = run_host_parallel();
printf("Execution time (host serial): %.6f\n", thost_serial);
printf("Execution time (host parallel): %.6f\n", thost_par);
printf("Speedup host_serial/host_omp: %.2f\n", thost_serial / thost_par);
return 0;
}
|
pp_collision.c | /* Copyright (C) 2017 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include "imag_self_energy_with_g.h"
#include "interaction.h"
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "pp_collision.h"
#include "triplet.h"
#include "triplet_iw.h"
#include "lapack_wrapper.h"
static void get_collision(double *ise,
const long num_band0,
const long num_band,
const long num_temps,
const double *temperatures,
const double *g,
const char *g_zero,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const long triplet[3],
const long triplet_weight,
const ConstBZGrid *bzgrid,
const double *fc3,
const long is_compact_fc3,
const double *shortest_vectors,
const long svecs_dims[3],
const long *multiplicity,
const double *masses,
const long *p2s_map,
const long *s2p_map,
const long *band_indices,
const long symmetrize_fc3_q,
const double cutoff_frequency,
const long openmp_per_triplets);
static void finalize_ise(double *imag_self_energy,
const double *ise,
const long (*bz_grid_address)[3],
const long (*triplets)[3],
const long num_triplets,
const long num_temps,
const long num_band0,
const long is_NU);
void ppc_get_pp_collision(double *imag_self_energy,
const long relative_grid_address[24][4][3], /* thm */
const double *frequencies,
const lapack_complex_double *eigenvectors,
const long (*triplets)[3],
const long num_triplets,
const long *triplet_weights,
const ConstBZGrid *bzgrid,
const double *fc3,
const long is_compact_fc3,
const double *shortest_vectors,
const long svecs_dims[3],
const long *multiplicity,
const double *masses,
const long *p2s_map,
const long *s2p_map,
const Larray *band_indices,
const Darray *temperatures,
const long is_NU,
const long symmetrize_fc3_q,
const double cutoff_frequency)
{
long i;
long num_band, num_band0, num_band_prod, num_temps;
long openmp_per_triplets;
double *ise, *freqs_at_gp, *g;
char *g_zero;
long tp_relative_grid_address[2][24][4][3];
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = svecs_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_temps = temperatures->dims[0];
ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] = frequencies[triplets[0][0] * num_band
+ band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address,
2);
#pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
g = (double*)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char*)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight(g,
g_zero,
freqs_at_gp, /* used as f0 */
num_band0,
tp_relative_grid_address,
triplets[i],
1,
bzgrid,
frequencies, /* used as f1 */
num_band,
frequencies, /* used as f2 */
num_band,
2,
1 - openmp_per_triplets);
get_collision(ise + i * num_temps * num_band0,
num_band0,
num_band,
num_temps,
temperatures->data,
g,
g_zero,
frequencies,
eigenvectors,
triplets[i],
triplet_weights[i],
bzgrid,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices->data,
symmetrize_fc3_q,
cutoff_frequency,
openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy,
ise,
bzgrid->addresses,
triplets,
num_triplets,
num_temps,
num_band0,
is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
void ppc_get_pp_collision_with_sigma(
double *imag_self_energy,
const double sigma,
const double sigma_cutoff,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const long (*triplets)[3],
const long num_triplets,
const long *triplet_weights,
const ConstBZGrid *bzgrid,
const double *fc3,
const long is_compact_fc3,
const double *shortest_vectors,
const long svecs_dims[3],
const long *multiplicity,
const double *masses,
const long *p2s_map,
const long *s2p_map,
const Larray *band_indices,
const Darray *temperatures,
const long is_NU,
const long symmetrize_fc3_q,
const double cutoff_frequency)
{
long i;
long num_band, num_band0, num_band_prod, num_temps;
long openmp_per_triplets, const_adrs_shift;
double cutoff;
double *ise, *freqs_at_gp, *g;
char *g_zero;
ise = NULL;
freqs_at_gp = NULL;
g = NULL;
g_zero = NULL;
num_band0 = band_indices->dims[0];
num_band = svecs_dims[1] * 3;
num_band_prod = num_band0 * num_band * num_band;
num_temps = temperatures->dims[0];
const_adrs_shift = num_band_prod;
ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0);
freqs_at_gp = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
freqs_at_gp[i] = frequencies[triplets[0][0] * num_band +
band_indices->data[i]];
}
if (num_triplets > num_band) {
openmp_per_triplets = 1;
} else {
openmp_per_triplets = 0;
}
cutoff = sigma * sigma_cutoff;
#pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
g = (double*)malloc(sizeof(double) * 2 * num_band_prod);
g_zero = (char*)malloc(sizeof(char) * num_band_prod);
tpi_get_integration_weight_with_sigma(g,
g_zero,
sigma,
cutoff,
freqs_at_gp,
num_band0,
triplets[i],
const_adrs_shift,
frequencies,
num_band,
2,
0);
get_collision(ise + i * num_temps * num_band0,
num_band0,
num_band,
num_temps,
temperatures->data,
g,
g_zero,
frequencies,
eigenvectors,
triplets[i],
triplet_weights[i],
bzgrid,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices->data,
symmetrize_fc3_q,
cutoff_frequency,
openmp_per_triplets);
free(g_zero);
g_zero = NULL;
free(g);
g = NULL;
}
finalize_ise(imag_self_energy,
ise,
bzgrid->addresses,
triplets,
num_triplets,
num_temps,
num_band0,
is_NU);
free(freqs_at_gp);
freqs_at_gp = NULL;
free(ise);
ise = NULL;
}
static void get_collision(double *ise,
const long num_band0,
const long num_band,
const long num_temps,
const double *temperatures,
const double *g,
const char *g_zero,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const long triplet[3],
const long triplet_weight,
const ConstBZGrid *bzgrid,
const double *fc3,
const long is_compact_fc3,
const double *shortest_vectors,
const long svecs_dims[3],
const long *multiplicity,
const double *masses,
const long *p2s_map,
const long *s2p_map,
const long *band_indices,
const long symmetrize_fc3_q,
const double cutoff_frequency,
const long openmp_per_triplets)
{
long i;
long num_band_prod, num_g_pos;
double *fc3_normal_squared;
long (*g_pos)[4];
fc3_normal_squared = NULL;
g_pos = NULL;
num_band_prod = num_band0 * num_band * num_band;
fc3_normal_squared = (double*)malloc(sizeof(double) * num_band_prod);
g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod);
for (i = 0; i < num_band_prod; i++) {
fc3_normal_squared[i] = 0;
}
num_g_pos = ise_set_g_pos(g_pos,
num_band0,
num_band,
g_zero);
itr_get_interaction_at_triplet(
fc3_normal_squared,
num_band0,
num_band,
g_pos,
num_g_pos,
frequencies,
eigenvectors,
triplet,
bzgrid,
fc3,
is_compact_fc3,
shortest_vectors,
svecs_dims,
multiplicity,
masses,
p2s_map,
s2p_map,
band_indices,
symmetrize_fc3_q,
cutoff_frequency,
0,
0,
1 - openmp_per_triplets);
ise_imag_self_energy_at_triplet(
ise,
num_band0,
num_band,
fc3_normal_squared,
frequencies,
triplet,
triplet_weight,
g,
g + num_band_prod,
g_pos,
num_g_pos,
temperatures,
num_temps,
cutoff_frequency,
1 - openmp_per_triplets,
0);
free(fc3_normal_squared);
fc3_normal_squared = NULL;
free(g_pos);
g_pos = NULL;
}
static void finalize_ise(double *imag_self_energy,
const double *ise,
const long (*bz_grid_addresses)[3],
const long (*triplets)[3],
const long num_triplets,
const long num_temps,
const long num_band0,
const long is_NU)
{
long i, j, k;
long is_N;
if (is_NU) {
for (i = 0; i < 2 * num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
is_N = tpl_is_N(triplets[i], bz_grid_addresses);
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
if (is_N) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
} else {
imag_self_energy[num_temps * num_band0 + j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
} else {
for (i = 0; i < num_temps * num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_temps; j++) {
for (k = 0; k < num_band0; k++) {
imag_self_energy[j * num_band0 + k] +=
ise[i * num_temps * num_band0 + j * num_band0 + k];
}
}
}
}
}
|
ncpdq.c | /* $Header$ */
/* ncpdq -- netCDF pack, re-dimension, query */
/* Purpose: Pack, re-dimension, query single netCDF file and output to a single file */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncpdq -O -D 3 -a lat,lev,lon -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,lev,lat -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,time -x -v three_double_dmn ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -P all_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P all_xst ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P xst_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M dbl_flt ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M flt_dbl ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P upk ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -a lon,lat -g g21,g22 ~/nco/data/in_grp_3.nc ~/foo.nc
ncpdq -O -D 3 -g g1 -v v1 --union -G dude -p ~/nco/data in_grp.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <time.h> /* machine time */
#ifndef _MSC_VER
# include <unistd.h> /* POSIX stuff */
#endif
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
#ifdef I18N
# include <langinfo.h> /* nl_langinfo() */
# include <libintl.h> /* Internationalization i18n */
# include <locale.h> /* Locale setlocale() */
# define _(sng) gettext (sng)
# define gettext_noop(sng) (sng)
# define N_(sng) gettext_noop(sng)
#endif /* I18N */
/* Supply stub gettext() function in case i18n failed */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include <netcdf_par.h> /* Parallel netCDF definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
aed_sct *aed_lst_add_fst=NULL_CEWI;
aed_sct *aed_lst_scl_fct=NULL_CEWI;
char **dmn_rdr_lst_in=NULL_CEWI; /* Option a */
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in=NULL_CEWI;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char **grp_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *nco_pck_map_sng=NULL_CEWI; /* [sng] Packing map Option M */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl; /* [sng] Local copy of system optarg */
char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
char add_fst_sng[]="add_offset"; /* [sng] Unidata standard string for add offset */
char scl_fct_sng[]="scale_factor"; /* [sng] Unidata standard string for scale factor */
char trv_pth[]="/"; /* [sng] Root path of traversal tree */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567Aa:CcD:d:Fg:G:hL:l:M:Oo:P:p:Rrt:v:UxZ-:";
cnk_sct cnk; /* [sct] Chunking structure */
cnv_sct *cnv; /* [sct] Convention structure */
#if defined(__cplusplus) || defined(PGI_CC)
ddra_info_sct ddra_info;
ddra_info.flg_ddra=False;
#else /* !__cplusplus */
ddra_info_sct ddra_info={.flg_ddra=False};
#endif /* !__cplusplus */
dmn_sct **dmn_rdr_trv=NULL; /* [sct] Dimension structures to be re-ordered (from global table) */
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */
int *in_id_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int dmn_rdr_nbr=0; /* [nbr] Number of dimension to re-order */
int dmn_rdr_nbr_trv=0; /* [nbr] Number of dimension to re-order (from global table) */
int dmn_rdr_nbr_in=0; /* [nbr] Original number of dimension to re-order */
int fl_idx=int_CEWI;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int idx_rdr=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int nco_pck_map=nco_pck_map_flt_sht; /* [enm] Packing map */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Packing policy */
int opt;
int out_id;
int ppc_nbr=0; /* [nbr] Number of PPC arguments */
int rcd=NC_NOERR; /* [rcd] Return code */
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int var_lst_in_nbr=0;
int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */
md5_sct *md5=NULL; /* [sct] MD5 configuration */
nco_bool *dmn_rvr_rdr=NULL; /* [flg] Reverse dimensions */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool IS_REORDER=False; /* Re-order mode */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc;
var_sct **var_prc_out;
trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */
nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */
{"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"help",no_argument,0,0},
{"hlp",no_argument,0,0},
{"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */
{"mrd",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"multiple_record_dimension",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"intersection",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"nsx",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"union",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"unn",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"upk",required_argument,0,0}, /* [enm] Unpacking convention to utilize */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"arrange",required_argument,0,'a'},
{"permute",required_argument,0,'a'},
{"reorder",required_argument,0,'a'},
{"rdr",required_argument,0,'a'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"gpe",required_argument,0,'G'}, /* [sng] Group Path Edit (GPE) */
{"grp",required_argument,0,'g'},
{"group",required_argument,0,'g'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"pack_map",required_argument,0,'M'},
{"pck_map",required_argument,0,'M'},
{"map",required_argument,0,'M'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"pack_policy",required_argument,0,'P'},
{"pck_plc",required_argument,0,'P'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"unpack",no_argument,0,'U'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
/* Initialize traversal table */
trv_tbl_init(&trv_tbl);
/* Start timer and save command line */
ddra_info.tmr_flg=nco_tmr_srt;
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_mtd;
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
#ifdef ENABLE_MPI
/* MPI Initialization */
if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm);
MPI_Init(&argc,&argv);
MPI_Comm_size(mpi_cmm,&prc_nbr);
MPI_Comm_rank(mpi_cmm,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){
cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_csh_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */
if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */
if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
} /* endif "help" */
if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"mrd") || !strcmp(opt_crr,"multiple_record_dimension")) nco_mrd_cnv=nco_mrd_allow; /* [enm] Multiple Record Dimension convention */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){
ppc_arg[ppc_nbr]=(char *)strdup(optarg);
ppc_nbr++;
} /* endif "ppc" */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"unn") || !strcmp(opt_crr,"union")) GRP_VAR_UNN=True;
if(!strcmp(opt_crr,"nsx") || !strcmp(opt_crr,"intersection")) GRP_VAR_UNN=False;
if(!strcmp(opt_crr,"upk")){ /* [enm] Unpacking convention to utilize */
nco_upk_cnv=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'a': /* Re-order dimensions */
flg_dmn_prc_usr_spc=True;
dmn_rdr_lst_in=nco_lst_prs_2D(optarg,",",&dmn_rdr_nbr_in);
dmn_rdr_nbr=dmn_rdr_nbr_in;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'G': /* Apply Group Path Editing (GPE) to output group */
/* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it
http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */
gpe=nco_gpe_prs_arg(optarg);
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case 'g': /* Copy group argument for later processing */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'M': /* Packing map */
nco_pck_map_sng=(char *)strdup(optarg);
nco_pck_map=nco_pck_map_get(nco_pck_map_sng);
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'U': /* Unpacking switch */
nco_pck_plc_sng=(char *)strdup("upk");
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */
(void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Set/report global chunk cache */
rcd+=nco_cnk_csh_ini(cnk_csh_byt);
/* Set re-order flag */
if(dmn_rdr_nbr > 0) IS_REORDER=True;
/* No re-order dimensions specified implies packing request */
if(dmn_rdr_nbr == 0){
if(nco_pck_plc == nco_pck_plc_nil) nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: DEBUG Packing map is %s and packing policy is %s\n",nco_prg_nm_get(),nco_pck_map_sng_get(nco_pck_map),nco_pck_plc_sng_get(nco_pck_plc));
} /* dmn_rdr_nbr != 0 */
/* From this point forward, assume ncpdq operator packs or re-orders, not both */
if(dmn_rdr_nbr > 0 && nco_pck_plc != nco_pck_plc_nil){
(void)fprintf(fp_stdout,"%s: ERROR %s does not support simultaneous dimension re-ordering (-a switch) and packing (-P switch).\nHINT: Invoke %s twice, once to re-order (with -a), and once to pack (with -P).\n",nco_prg_nm,nco_prg_nm,nco_prg_nm);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get file format */
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */
(void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl);
/* Were all user-specified dimensions found? */
(void)nco_chk_dmn(lmt_nbr,flg_dne);
/* Create reversed dimension list */
if(dmn_rdr_nbr_in > 0){
dmn_rvr_rdr=(nco_bool *)nco_malloc(dmn_rdr_nbr_in*sizeof(nco_bool));
/* Is dimension to be reversed? i.e., does string begin with minus-sign '-'? */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
if(dmn_rdr_lst_in[idx_rdr][0] == '-'){
dmn_rvr_rdr[idx_rdr]=True;
/* Strip-out '-': Copy string to new memory one past negative sign to avoid losing byte */
optarg_lcl=dmn_rdr_lst_in[idx_rdr];
dmn_rdr_lst_in[idx_rdr]=(char *)strdup(optarg_lcl+1L);
optarg_lcl=(char *)nco_free(optarg_lcl);
}else{
dmn_rvr_rdr[idx_rdr]=False;
} /* !'-' */
} /* !idx_rdr */
} /* !dmn_rdr_nbr_in */
/* Get number of variables, dimensions, and global attributes in file, file format */
(void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl);
/* Create list of dimensions to average(ncwa)/re-order(ncpdq) */
if(IS_REORDER) (void)nco_dmn_avg_mk(in_id,dmn_rdr_lst_in,dmn_rdr_nbr_in,flg_dmn_prc_usr_spc,False,trv_tbl,&dmn_rdr_trv,&dmn_rdr_nbr_trv);
/* Fill-in variable structure list for all extracted variables */
var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl);
/* Duplicate to output array */
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over variables */
/* Refresh var_out with dim_out data */
(void)nco_var_dmn_refresh(var_out,xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_map,nco_pck_plc,dmn_rdr_trv,dmn_rdr_nbr_trv,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl);
/* Store processed and fixed variables info into GTT */
(void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl);
/* We now have final list of variables to extract. Phew. */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Initialize, decode, and set PPC information */
if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl);
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Initialize chunking from user-specified inputs */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk);
if(IS_REORDER){
dmn_sct **dmn_rdr=NULL; /* [sct] Dimension structures to be re-ordered */
/* "dmn_rdr" is only used for input to function nco_var_dmn_rdr_mtd(), that compares dimensions by short name;
this is because the input list of -a are dimension short names; group support is obtained combining with -g option;
on input it contains a list of dimension short names (in "dmn_rdr"), that together with input array "dmn_rvr_rdr"
of flags that determine if dimension at index dmn_rvr_rdr[index] is to be reversed; use cases:
in_grp_8.nc contains the dimensions /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a -lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], striped of '-' (minus) sign and dmn_rvr_rdr contains [True],[True ]
output is reversed /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], and dmn_rvr_rdr contains [False],[True ]
output is reversed /g1/lon, /g2/lon */
/* Form list of re-ordering dimensions from extracted input dimensions */
dmn_rdr=(dmn_sct **)nco_malloc(dmn_rdr_nbr*sizeof(dmn_sct *));
/* Initialize re-ordering dimensions; initialize only short name */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
dmn_rdr[idx_rdr]=(dmn_sct *)nco_malloc(sizeof(dmn_sct));
dmn_rdr[idx_rdr]->nm=(char *)strdup(dmn_rdr_lst_in[idx_rdr]);
dmn_rdr[idx_rdr]->nm_fll=NULL;
dmn_rdr[idx_rdr]->id=-1;
}
/* Determine and set new dimensionality in metadata of each re-ordered variable */
(void)nco_var_dmn_rdr_mtd_trv(trv_tbl,nbr_var_prc,var_prc,var_prc_out,nbr_var_fix,var_fix,dmn_rdr,dmn_rdr_nbr,dmn_rvr_rdr);
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_in; idx_rdr++){
dmn_rdr[idx_rdr]->nm=(char *)nco_free(dmn_rdr[idx_rdr]->nm);
dmn_rdr[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr[idx_rdr]);
}
dmn_rdr=(dmn_sct **)nco_free(dmn_rdr);
} /* IS_REORDER */
/* Alter metadata for variables that will be packed */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc != nco_pck_plc_upk){
/* Allocate attribute list container for maximum number of entries */
aed_lst_add_fst=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
aed_lst_scl_fct=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
} /* endif packing */
for(idx=0;idx<nbr_var_prc;idx++){
nco_pck_mtd(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc);
if(nco_pck_plc != nco_pck_plc_upk){
/* Use same copy of attribute name for all edits */
aed_lst_add_fst[idx].att_nm=add_fst_sng;
aed_lst_scl_fct[idx].att_nm=scl_fct_sng;
} /* endif packing */
} /* end loop over var_prc */
/* Transfer variable type to table. NB: Use processed variables set with new type. MUST be done before definition. */
(void)nco_var_typ_trv(nbr_var_prc,var_prc_out,trv_tbl);
} /* nco_pck_plc == nco_pck_plc_nil */
/* Define dimensions, extracted groups, variables, and attributes in output file. NB: record name is NULL */
(void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc,(char *)NULL,trv_tbl);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
/* Copy variable data for non-processed variables */
(void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl);
/* Close first input netCDF file */
nco_close(in_id);
/* Loop over input files (not currently used, fl_nbr == 1) */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
/* Timestamp end of metadata setup and disk layout */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_rgl;
#ifdef _OPENMP
#pragma omp parallel for private(idx,in_id) shared(aed_lst_add_fst,aed_lst_scl_fct,nco_dbg_lvl,dmn_rdr_nbr,gpe,in_id_arr,nbr_var_prc,nco_pck_map,nco_pck_plc,out_id,nco_prg_nm,rcd,var_prc,var_prc_out,nbr_dmn_fl,trv_tbl,IS_REORDER,fl_out_fmt)
#endif /* !_OPENMP */
/* Process all variables in current file */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
in_id=in_id_arr[omp_get_thread_num()];
var_prc[idx]->nc_id=in_id;
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Retrieve variable from disk into memory */
(void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl);
/* If re-ordering */
if(IS_REORDER){
if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){
(void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm);
nco_exit(EXIT_FAILURE);
} /* endif err */
/* Change dimensionionality of values */
(void)nco_var_dmn_rdr_val_trv(var_prc[idx],var_prc_out[idx],trv_tbl);
/* Re-ordering required two value buffers, time to free() input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* IS_REORDER */
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* Store the output variable ID */
var_prc_out[idx]->id=var_out_id;
if(nco_pck_plc != nco_pck_plc_nil){
/* Copy input variable buffer to processed variable buffer */
/* fxm: this is dangerous and leads to double free()'ing variable buffer */
var_prc_out[idx]->val=var_prc[idx]->val;
/* (Un-)Pack variable according to packing specification */
nco_pck_val(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc,aed_lst_add_fst+idx,aed_lst_scl_fct+idx);
} /* endif nco_pck_plc != nco_pck_plc_nil */
if(var_trv->ppc != NC_MAX_INT){
if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val);
} /* endif ppc */
if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp);
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{ /* begin OpenMP critical */
/* Copy variable to output file then free value buffer */
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is array */
} /* end OpenMP critical */
/* Free current output buffer */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\n");
/* Write/overwrite packing attributes for newly packed and re-packed variables
Logic here should nearly mimic logic in nco_var_dfn() */
if(nco_pck_plc != nco_pck_plc_nil && nco_pck_plc != nco_pck_plc_upk){
/* ...put file in define mode to allow metadata writing... */
(void)nco_redef(out_id);
/* ...loop through all variables that may have been packed... */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* nco_var_dfn() pre-defined dummy packing attributes in output file only for "packable" input variables */
if(nco_pck_plc_typ_get(nco_pck_map,var_prc[idx]->typ_upk,(nc_type *)NULL)){
/* Verify input variable was newly packed by this operator
Writing pre-existing (non-re-packed) attributes here would fail because
nco_pck_dsk_inq() never fills in var->scl_fct.vp and var->add_fst.vp
Logic is same as in nco_var_dfn() (except var_prc[] instead of var[])
If operator newly packed this particular variable... */
if(
/* ...either because operator newly packs all variables... */
(nco_pck_plc == nco_pck_plc_all_new_att && nco_pck_map != nco_pck_map_dbl_flt && nco_pck_map != nco_pck_map_flt_dbl) ||
/* ...or because operator newly packs un-packed variables like this one... */
(nco_pck_plc == nco_pck_plc_all_xst_att && !var_prc[idx]->pck_ram) ||
/* ...or because operator re-packs packed variables like this one... */
(nco_pck_plc == nco_pck_plc_xst_new_att && var_prc[idx]->pck_ram)
){
/* Replace dummy packing attributes with final values, or delete them */
if(nco_dbg_lvl >= nco_dbg_io) (void)fprintf(stderr,"%s: main() replacing dummy packing attribute values for variable %s\n",nco_prg_nm,var_prc[idx]->nm);
(void)nco_aed_prc(grp_out_id,aed_lst_add_fst[idx].id,aed_lst_add_fst[idx]);
(void)nco_aed_prc(grp_out_id,aed_lst_scl_fct[idx].id,aed_lst_scl_fct[idx]);
} /* endif variable is newly packed by this operator */
} /* !nco_pck_plc_alw */
} /* end loop over var_prc */
/* Take output file out of define mode */
if(hdr_pad == 0UL) (void)nco_enddef(out_id); else (void)nco__enddef(out_id,hdr_pad);
} /* nco_pck_plc == nco_pck_plc_nil || nco_pck_plc == nco_pck_plc_upk */
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncpdq-specific memory cleanup */
if(dmn_rdr_nbr > 0){
if(dmn_rdr_nbr_in > 0) dmn_rdr_lst_in=nco_sng_lst_free(dmn_rdr_lst_in,dmn_rdr_nbr_in);
dmn_rvr_rdr=(nco_bool *)nco_free(dmn_rvr_rdr);
/* Free dimension list pointers */
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_trv; idx_rdr++){
dmn_rdr_trv[idx_rdr]->nm=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm);
dmn_rdr_trv[idx_rdr]->nm_fll=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm_fll);
dmn_rdr_trv[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr_trv[idx_rdr]);
}
dmn_rdr_trv=(dmn_sct **)nco_free(dmn_rdr_trv);
/* Dimension structures in dmn_rdr are owned by dmn and dmn_out, free'd later */
} /* endif dmn_rdr_nbr > 0 */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc_sng) nco_pck_plc_sng=(char *)nco_free(nco_pck_plc_sng);
if(nco_pck_map_sng) nco_pck_map_sng=(char *)nco_free(nco_pck_map_sng);
if(nco_pck_plc != nco_pck_plc_upk){
/* No need for loop over var_prc variables to free attribute values
Variable structures and attribute edit lists share same attribute values
Free them only once, and do it in nco_var_free() */
aed_lst_add_fst=(aed_sct *)nco_free(aed_lst_add_fst);
aed_lst_scl_fct=(aed_sct *)nco_free(aed_lst_scl_fct);
} /* nco_pck_plc == nco_pck_plc_upk */
} /* nco_pck_plc == nco_pck_plc_nil */
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr);
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
trv_tbl_free(trv_tbl);
for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm);
if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne);
if(gpe) gpe=(gpe_sct *)nco_gpe_free(gpe);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
/* End timer */
ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(1,ceild(8*t2-Nz+9,4)),t1+1);t3<=min(floord(4*Nt+Ny-9,4),floord(4*t1+Ny-1,4));t3++) {
for (t4=max(max(ceild(t1-14,16),ceild(8*t2-Nz-51,64)),ceild(4*t3-Ny-51,64));t4<=min(min(floord(4*Nt+Nx-9,64),floord(4*t1+Nx-1,64)),floord(4*t3+Nx-9,64));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),t3-1),16*t4+14);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__ne_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_uint64
// A.*B function (eWiseMult): GB_AemultB__ne_uint64
// A*D function (colscale): GB_AxD__ne_uint64
// D*A function (rowscale): GB_DxB__ne_uint64
// C+=B function (dense accum): GB_Cdense_accumB__ne_uint64
// C+=b function (dense accum): GB_Cdense_accumb__ne_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint64
// C=scalar+B GB_bind1st__ne_uint64
// C=scalar+B' GB_bind1st_tran__ne_uint64
// C=A+scalar GB_bind2nd__ne_uint64
// C=A'+scalar GB_bind2nd_tran__ne_uint64
// C type: bool
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT64 || GxB_NO_NE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if defined(SIMDE_BUG_CLANG_44589)
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if defined(SIMDE_BUG_CLANG_44589)
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_shuffle_ps(a, b, imm8) \
__extension__({ \
float32x4_t ret; \
ret = vmovq_n_f32( \
vgetq_lane_f32(a, (imm8) & (0x3))); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \
ret, 1); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \
ret, 2); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \
ret, 3); \
})
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright @ 2010 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
ssize_t
i;
ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidOption","`%s'","Should not call this function");
return((KernelInfo *) NULL);
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
ssize_t
x,r;
double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidOption","`%s'","Not a Primitive Morphology Method");
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=1.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
{
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels,
*magick_restrict quantum_pixels;
ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
quantum_pixels=(const Quantum *) NULL;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
quantum_pixels=pixels;
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
quantum_pixels=pixels;
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentially white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
if (quantum_pixels != (const Quantum *) NULL)
{
SetPixelChannel(morphology_image,channel,quantum_pixels[i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ ssize_t
i,j,x,y;
MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
double
pos_scale,
neg_scale;
ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
1.for.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 16
/* Q1: How many iterations from the first loop are executed by each thread */
/* Q2: How many iterations from the second loop are executed by */
/* each thread */
/* Q3: Add a directive so that the first "printf" is executed only once */
/* by the first thread that finds it. */
int main()
{
int i;
omp_set_num_threads(8);
#pragma omp parallel
{
#pragma omp single
printf("Going to distribute iterations in first loop ...\n");
#pragma omp for
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("(%d) gets iteration %d\n",id,i);
}
}
printf("\nGoing to distribute iterations in second loop ...\n");
#pragma omp parallel for
for (i=0; i < N+3; i++) {
int id=omp_get_thread_num();
printf("(%d) gets iteration %d\n",id,i);
}
return 0;
}
|
ocp_nlp_common.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
// blasfeo
#include "blasfeo/include/blasfeo_common.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// hpipm
#include "hpipm/include/hpipm_d_ocp_qp_dim.h"
// acados
#include "acados/utils/mem.h"
// openmp
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
/************************************************
* config
************************************************/
int ocp_nlp_config_calculate_size(int N)
{
int ii;
int size = 0;
// self
size += sizeof(ocp_nlp_config);
// qp solver
size += 1 * ocp_qp_xcond_solver_config_calculate_size();
// regularization
size += ocp_nlp_reg_config_calculate_size();
// dynamics
size += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size();
// cost
size += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size();
// constraints
size += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size();
return size;
}
ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory)
{
int ii;
char *c_ptr = (char *) raw_memory;
ocp_nlp_config *config = (ocp_nlp_config *) c_ptr;
c_ptr += sizeof(ocp_nlp_config);
config->N = N;
// qp solver
config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr);
c_ptr += ocp_qp_xcond_solver_config_calculate_size();
// regularization
config->regularize = ocp_nlp_reg_config_assign(c_ptr);
c_ptr += ocp_nlp_reg_config_calculate_size();
// dynamics
config->dynamics = (ocp_nlp_dynamics_config **) c_ptr;
c_ptr += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr);
c_ptr += ocp_nlp_dynamics_config_calculate_size();
}
// cost
config->cost = (ocp_nlp_cost_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++)
{
config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr);
c_ptr += ocp_nlp_cost_config_calculate_size();
}
// constraints
config->constraints = (ocp_nlp_constraints_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr);
c_ptr += ocp_nlp_constraints_config_calculate_size();
}
return config;
}
/************************************************
* dims
************************************************/
static int ocp_nlp_dims_calculate_size_self(int N)
{
int size = 0;
size += sizeof(ocp_nlp_dims);
// nlp sizes
size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns
// dynamics
size += N * sizeof(void *);
// cost
size += (N + 1) * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
// regularization
size += ocp_nlp_reg_dims_calculate_size(N);
size += sizeof(ocp_nlp_reg_dims);
size += 8; // initial align
return size;
}
int ocp_nlp_dims_calculate_size(void *config_)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
int size = 0;
// self
size += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
// cost
for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]);
// constraints
for (ii = 0; ii <= N; ii++)
size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
// qp solver
size += config->qp_solver->dims_calculate_size(config->qp_solver, N);
return size;
}
static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
int ii;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr;
c_ptr += sizeof(ocp_nlp_dims);
// nv
assign_and_advance_int(N + 1, &dims->nv, &c_ptr);
// nx
assign_and_advance_int(N + 1, &dims->nx, &c_ptr);
// nu
assign_and_advance_int(N + 1, &dims->nu, &c_ptr);
// ni
assign_and_advance_int(N + 1, &dims->ni, &c_ptr);
// nz
assign_and_advance_int(N + 1, &dims->nz, &c_ptr);
// ns
assign_and_advance_int(N + 1, &dims->ns, &c_ptr);
// dynamics
dims->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
dims->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
dims->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// regularization
dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr);
c_ptr += ocp_nlp_reg_dims_calculate_size(N);
/* initialize qp_solver dimensions */
// dims->qp_solver->N = N;
// for (ii = 0; ii <= N; ii++)
// {
// TODO(dimitris): values below are needed for reformulation of QP when soft constraints
// are not supported. Make this a bit more transparent as it clushes with nbx/nbu above.
// dims->qp_solver->nsbx[ii] = 0;
// dims->qp_solver->nsbu[ii] = 0;
// dims->qp_solver->nsg[ii] = 0;
// }
// N
dims->N = N;
// initialize dimensions to zero by default
// nv
for(ii=0; ii<=N; ii++)
dims->nv[ii] = 0;
// nx
for(ii=0; ii<=N; ii++)
dims->nx[ii] = 0;
// nu
for(ii=0; ii<=N; ii++)
dims->nu[ii] = 0;
// ni
for(ii=0; ii<=N; ii++)
dims->ni[ii] = 0;
// nz
for(ii=0; ii<=N; ii++)
dims->nz[ii] = 0;
// ns
for(ii=0; ii<=N; ii++)
dims->ns[ii] = 0;
// TODO initialize dims to zero by default also in modules !!!!!!!
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr);
return dims;
}
ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
char *c_ptr = (char *) raw_memory;
// self
ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr);
c_ptr += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr);
c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr);
c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
dims->constraints[ii] =
config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
}
// qp solver
dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr);
c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N);
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr);
return dims;
}
void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field,
const void* value_array)
{
// to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int ii;
int N = config->N;
int *int_array = (int *) value_array;
/* set ocp_nlp dimension */
if (!strcmp(field, "nx"))
{
// opt var
for (ii = 0; ii <= N; ii++)
{
// set nx
dims->nx[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nx", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nx", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]);
}
}
else if (!strcmp(field, "nu"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nu
dims->nu[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nu", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nu", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]);
}
}
else if (!strcmp(field, "nz"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nz
dims->nz[ii] = int_array[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nz", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nz", &int_array[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nz", &int_array[i]);
}
}
else if (!strcmp(field, "ns"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set ns
dims->ns[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "ns", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns",
&int_array[i]);
}
}
else
{
printf("error: dims type not available in module ocp_nlp: %s", field);
exit(1);
}
#if 0
/* set ocp_nlp submodule dimensions */
if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], field, &int_array[i]);
}
}
if (!strcmp(field, "nu"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
}
if (!strcmp(field, "nx"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
}
for (int i = 0; i <= N; i++) // cost
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], field, &int_array[i]);
}
for (int i = 0; i <= N; i++) // constraints
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, &int_array[i]);
}
if (strcmp(field, "nz")) // qp_solver does not contain nz
{
for (int i = 0; i <= N; i++) // qp_solver
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field,
&int_array[i]);
}
}
#endif
return;
}
void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field,
const void* value_)
{
// to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
int i = stage;
// set in constraint module
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, int_value);
// update ni in ocp_nlp dimensions
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ni", &dims->ni[i]);
// update qp_solver dims
if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value);
}
else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ng_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver);
}
else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi")))
{
// update ng_qp_solver in qp_solver
int nsg_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver);
}
else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"nge_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver);
}
return;
}
void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage,
const char *field, const void* value_)
{
// to set dimension ny (output)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value);
}
void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage,
const char *field, const void* value)
{
// mainly for gnsf dimensions
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value;
config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value);
}
/************************************************
* in
************************************************/
int ocp_nlp_in_calculate_size_self(int N)
{
int size = sizeof(ocp_nlp_in);
size += N * sizeof(double); // Ts
size += N * sizeof(void *); // dynamics
size += (N + 1) * sizeof(void *); // cost
size += (N + 1) * sizeof(void *); // constraints
return size;
}
int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
int ii;
int N = dims->N;
int size = ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
size +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
size += 8; // initial align
size += 8; // final align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_in *in = (ocp_nlp_in *) c_ptr;
c_ptr += sizeof(ocp_nlp_in);
// Ts
assign_and_advance_double(N, &in->Ts, &c_ptr);
// dynamics
in->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
in->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
in->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
align_char_to(8, &c_ptr);
return in;
}
ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
int ii;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
// struct
ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr);
c_ptr += ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
in->dynamics[ii] =
config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr);
c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii],
dims->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr);
return in;
}
/************************************************
* out
************************************************/
int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
int size = sizeof(ocp_nlp_out);
size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z
size += 1 * N * sizeof(struct blasfeo_dvec); // pi
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux
size += 1 * blasfeo_memsize_dvec(nz[ii]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // ux
size += 1 * blasfeo_memsize_dvec(nz[N]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
// loop index
int ii;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_out *out = (ocp_nlp_out *) c_ptr;
c_ptr += sizeof(ocp_nlp_out);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// blasfeo_dvec_struct
// ux
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr);
// z
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr);
// pi
assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr);
// lam
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr);
// t
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// blasfeo_dvec
// ux
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr);
}
// z
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr);
}
// pi
for (int ii = 0; ii < N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr);
}
// lam
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr);
}
// t
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr);
}
// zero solution
for(ii=0; ii<N; ii++)
{
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
}
ii = N;
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr);
return out;
}
/************************************************
* options
************************************************/
int ocp_nlp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
size += 2*8; // 2 aligns
return size;
}
void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
align_char_to(8, &c_ptr);
ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_opts);
/* pointers to substructures */
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
align_char_to(8, &c_ptr);
/* substructures */
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
#if defined(ACADOS_NUM_THREADS)
opts->num_threads = ACADOS_NUM_THREADS;
// printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads);
#else
opts->num_threads = omp_get_max_threads();
// printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads());
#endif
#endif
// printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads);
opts->globalization = FIXED_STEP;
opts->step_length = 1.0;
opts->levenberg_marquardt = 0.0;
/* submodules opts */
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name, i.e. substring in field before '_'
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts,
field+module_length+1, value);
}
else // nlp opts
{
if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "globalization"))
{
char* globalization = (char *) value;
if (!strcmp(globalization, "fixed_step"))
{
opts->globalization = FIXED_STEP;
}
else if (!strcmp(globalization, "merit_backtracking"))
{
opts->globalization = MERIT_BACKTRACKING;
}
else
{
printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n",
globalization);
exit(1);
}
}
else if (!strcmp(field, "levenberg_marquardt"))
{
double* levenberg_marquardt = (double *) value;
opts->levenberg_marquardt = *levenberg_marquardt;
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii],
"compute_hess", value);
// constraints
for (ii=0; ii<=N; ii++)
config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii],
"compute_hess", value);
}
// selectively turn on exact hessian contributions
else if (!strcmp(field, "exact_hess_cost"))
{
int N = config->N;
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
}
else if (!strcmp(field, "exact_hess_dyn"))
{
int N = config->N;
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii],
"compute_hess", value);
}
else if (!strcmp(field, "exact_hess_constr"))
{
int N = config->N;
for (ii=0; ii<=N; ii++)
config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii],
"compute_hess", value);
}
else
{
printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to dynamics module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) )
{
config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage],
field+module_length+1, value );
}
// pass options to cost module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) )
{
config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage],
field+module_length+1, value);
}
// pass options to constraint module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) )
{
config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage],
(char *) field+module_length+1, value);
}
else
{
printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field);
exit(1);
}
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
// nlp res
size += ocp_nlp_res_calculate_size(dims);
size += (N+1)*sizeof(bool); // set_sim_guess
size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt
size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg
size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun
for (int ii = 0; ii < N; ii++)
{
size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj
size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun
size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess
}
size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj
size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess
size += 8; // initial align
size += 8; // middle align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_memory);
/* pointers to substructures */
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// middle align
align_char_to(8, &c_ptr);
/* substructures */
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize,
opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize,
opts->regularize);
// dynamics
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii],
dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// blasfeo_struct align
align_char_to(8, &c_ptr);
// dzduxt
assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr);
// z_alg
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr);
// cost_grad
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr);
// ineq_fun
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr);
// ineq_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr);
// dyn_fun
assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr);
// dyn_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr);
// sim_guess
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr);
// set_sim_guess
assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr);
for (int ii = 0; ii <= N; ++ii)
{
mem->set_sim_guess[ii] = false;
}
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int ii=0; ii<=N; ii++)
{
assign_and_advance_blasfeo_dmat_mem(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, &c_ptr);
}
// z_alg
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
// cost_grad
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr);
}
// ineq_fun
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr);
}
// ineq_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr);
}
// dyn_fun
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr);
}
// dyn_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr);
}
// sim_guess
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr);
// set to 0;
blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0);
// printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii);
}
// printf("created memory %p\n", mem);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
// nlp
size += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// weight_merit_fun
size += ocp_nlp_out_calculate_size(config, dims);
// array of pointers
// cost
size += (N+1)*sizeof(void *);
// dynamics
size += N*sizeof(void *);
// constraints
size += (N+1)*sizeof(void *);
// module workspace
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
size += 8; // struct align
return size;
}
ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr;
c_ptr += sizeof(ocp_nlp_workspace);
/* pointers to substructures */
//
work->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
align_char_to(8, &c_ptr);
/* substructures */
// tmp_nlp_out
work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// weight_merit_fun
work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return work;
}
/************************************************
* functions
************************************************/
void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int ii;
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]);
}
return;
}
void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int ii;
struct blasfeo_dvec *ineq_fun;
int N = dims->N;
int *ni = dims->ni;
int *ns = dims->ns;
int *nx = dims->nx;
int *nu = dims->nu;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun
blasfeo_dveccp(nx[ii]+nu[ii]+2*ns[ii], out->ux+ii, 0, work->tmp_nlp_out->ux+ii, 0);
// evaluate inequalities
config->constraints[ii]->compute_fun(config->constraints[ii], dims->constraints[ii],
in->constraints[ii], opts->constraints[ii],
mem->constraints[ii], work->constraints[ii]);
ineq_fun = config->constraints[ii]->memory_get_fun_ptr(mem->constraints[ii]);
// t = -ineq_fun
blasfeo_dveccpsc(2 * ni[ii], -1.0, ineq_fun, 0, out->t + ii, 0);
}
return;
}
void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem,
ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
if (i < N)
{
// Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye()
if (opts->levenberg_marquardt > 0.0)
blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt,
mem->qp_in->RSQrq+i, 0, 0);
// dynamics
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
else
{
// Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye()
if (opts->levenberg_marquardt > 0.0)
blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt,
mem->qp_in->RSQrq+i, 0, 0);
}
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i],
mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0);
}
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config,
ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int *ni = dims->ni;
// constraints
config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0],
in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]);
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]);
blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0);
// d
blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0);
return;
}
double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i, j;
int N = dims->N;
int *nx = dims->nx;
int *ni = dims->ni;
double merit_fun = 0.0;
// compute fun value
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// cost
config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i],
mem->cost[i], work->cost[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<N; i++)
{
// dynamics
config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i],
opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// constr
config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
}
double *tmp_fun;
double tmp;
struct blasfeo_dvec *tmp_fun_vec;
double cost_fun = 0.0;
for(i=0; i<=N; i++)
{
tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]);
cost_fun += *tmp_fun;
}
double dyn_fun = 0.0;
for(i=0; i<N; i++)
{
tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
// printf("\nMerit: dyn will multiply tmp_fun, weights\n");
// blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0);
for(j=0; j<nx[i+1]; j++)
{
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j));
}
}
double constr_fun = 0.0;
for(i=0; i<=N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
// blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0);
for(j=0; j<2*ni[i]; j++)
{
tmp = BLASFEO_DVECEL(tmp_fun_vec, j);
tmp = tmp>0.0 ? fabs(tmp) : 0.0; // tmp = constraint violation
// printf("IN merit fun: ineq i %d, j %d tmp_fun%e, multiplier %e\n", i, j, BLASFEO_DVECEL(tmp_fun_vec, j), BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j));
constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp;
}
}
merit_fun = cost_fun + dyn_fun + constr_fun;
// printf("\nMerit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun);
return merit_fun;
}
static double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *ni = dims->ni;
double alpha = opts->step_length;
double tmp0, tmp1;
int j;
#if 0 // Line Search Gianluca version
// current point
for (i = 0; i <= N; i++)
blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0);
// linear update of algebraic variables using state and input sensitivity
// if (i < N)
// {
// blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
// }
// initialize weights
if(mem->sqp_iter[0]==0)
{
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weight_merit_fun->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0);
}
// update weigths
for (i = 0; i < N; i++)
{
for(j=0; j<nx[i+1]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j)));
BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
for (i = 0; i <= N; i++)
{
for(j=0; j<2*ni[i]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j)));
BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
printf("\n\nmerit fun value\n");
double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double alpha_min = 0.1;
for (j=0; j<10 & alpha>alpha_min; j++)
{
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
printf("\n%d tmp merit fun value\n", j);
double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
if(merit_fun1 < merit_fun0)
{
break;
}
else
{
alpha *= 0.7;
}
}
printf("\nalpha %f\n", alpha);
#endif
if (opts->globalization == MERIT_BACKTRACKING)
{
// Line search version Jonathan
// Following Leineweber1999
// copy out (current iterate) to work->tmp_nlp_out
for (i = 0; i <= N; i++)
blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0);
// linear update of algebraic variables using state and input sensitivity
// if (i < N)
// {
// blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
// }
/* initialize (Leineweber1999 M5.1) */
if (mem->sqp_iter[0]==0)
{
// initialize weights
// equality merit weights = abs( eq multipliers )
for (i = 0; i < N; i++)
{
for (j=0; j<nx[i+1]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j));
BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0;
}
}
// printf("merit fun: initialize weights lam\n");
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0);
// blasfeo_print_dvec(nx[i+1], work->weight_merit_fun->lam+i, 0);
}
}
else
{
// update weights
// printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]);
for (i = 0; i < N; i++)
{
for(j=0; j<nx[i+1]; j++)
{
// abs(lambda) (LW)
tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j));
// .5 * (abs(lambda) + sigma)
tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j));
BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
for (i = 0; i <= N; i++)
{
for(j=0; j<2*ni[i]; j++)
{
// mu (LW)
tmp0 = BLASFEO_DVECEL(out->lam+i, j);
// .5 * (mu + tau)
tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j));
BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
}
if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter?
{
double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double alpha_min = 0.1;
// TODO(oj): add alpha_min and alpha_reduction factor [0.7] to options.
/* actual Line Search*/
alpha = 1.0;
// TODO: check out more advanced step search Leineweber1995
for (j=0; alpha>alpha_min; j++)
{
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
// printf("\ntmp merit fun value step search iter: %d", j);
double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
// TODO(oj): also check Armijo-type condition Leinweber1999 (2.35)
if (merit_fun1 < merit_fun0)
{
break;
}
else
{
alpha *= 0.7;
}
}
}
printf("\nalpha %f\n", alpha);
}
return alpha;
}
void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// step length
double alpha = ocp_nlp_line_search(config, dims, in, out, opts, mem, work);
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0);
// update dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0);
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0,
mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
}
}
return;
}
/************************************************
* residuals
************************************************/
int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_res);
size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp
size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_stat
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_eq
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_ineq res_comp
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_res *res = (ocp_nlp_res *) c_ptr;
c_ptr += sizeof(ocp_nlp_res);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// res_stat
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr);
// res_eq
assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr);
// res_ineq
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr);
// res_comp
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// res_stat
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_stat + ii, &c_ptr);
}
// res_eq
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_eq + ii, &c_ptr);
}
// res_ineq
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_ineq + ii, &c_ptr);
}
// res_comp
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_comp + ii, &c_ptr);
}
res->memsize = ocp_nlp_res_calculate_size(dims);
return res;
}
void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res,
ocp_nlp_memory *mem)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
double tmp_res;
// res_stat
res->inf_norm_res_stat = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_stat + ii,
0);
blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_stat + ii, 0,
res->res_stat + ii, 0);
blasfeo_dvecnrm_inf(nv[ii], res->res_stat + ii, 0, &tmp_res);
res->inf_norm_res_stat = tmp_res > res->inf_norm_res_stat ? tmp_res : res->inf_norm_res_stat;
}
// res_eq
res->inf_norm_res_eq = 0.0;
for (int ii = 0; ii < N; ii++)
{
blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_eq + ii, 0);
blasfeo_dvecnrm_inf(nx[ii + 1], res->res_eq + ii, 0, &tmp_res);
res->inf_norm_res_eq = tmp_res > res->inf_norm_res_eq ? tmp_res : res->inf_norm_res_eq;
}
// res_ineq
res->inf_norm_res_ineq = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_ineq + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_ineq + ii, 0, &tmp_res);
res->inf_norm_res_ineq = tmp_res > res->inf_norm_res_ineq ? tmp_res : res->inf_norm_res_ineq;
}
// res_comp
res->inf_norm_res_comp = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_comp + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_comp + ii, 0, &tmp_res);
res->inf_norm_res_comp = tmp_res > res->inf_norm_res_comp ? tmp_res : res->inf_norm_res_comp;
}
// printf("computed residuals g: %e, b: %e, d: %e, m: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq,
// res->inf_norm_res_ineq, res->inf_norm_res_comp);
return;
}
void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
// extract dims
int N = dims->N;
double* tmp_cost = NULL;
double total_cost = 0.0;
for (int ii = 0; ii <= N; ii++)
{
// set pointers
// NOTE(oj): the cost compute function takes the tmp_ux_ptr as input,
// since it is also used for globalization,
// especially with primal variables that are NOT current SQP iterates.
config->cost[ii]->memory_set_tmp_ux_ptr(out->ux+ii, mem->cost[ii]);
config->cost[ii]->compute_fun(config->cost[ii], dims->cost[ii], in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
tmp_cost = config->cost[ii]->memory_get_fun_ptr(mem->cost[ii]);
// printf("cost at stage %d = %e, total = %e\n", ii, *tmp_cost, total_cost);
total_cost += *tmp_cost;
}
mem->cost_value = total_cost;
// printf("\ncomputed total cost: %e\n", total_cost);
return;
}
|
rose_v1_globalArrays.c | #define MSIZE 256
#include <omp.h>
double u[256][256];
double f[256][256];
int n;
int m;
void initialize()
{
int i;
int j;
int xx;
n = 256;
m = 256;
double dx = 2.0 / (n - 1);
#pragma omp parallel for private (xx,i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,j) firstprivate (dx)
for (j = 0; j <= m - 1; j += 1) {
xx = ((int )(- 1.0 + dx * (i - 1)));
u[i][j] = 0.0;
f[i][j] = - 1.0 * (1.0 - (xx * xx));
}
}
}
|
GB_binop__bshift_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16)
// C=scalar+B GB (_bind1st__bshift_uint16)
// C=scalar+B' GB (_bind1st_tran__bshift_uint16)
// C=A+scalar GB (_bind2nd__bshift_uint16)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_gsmg.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Geometrically smooth interpolation multigrid
*
*****************************************************************************/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#ifdef HYPRE_USING_ESSL
#include <essl.h>
#else
#include "fortran.h"
#ifdef __cplusplus
extern "C" {
#endif
HYPRE_Int hypre_F90_NAME_LAPACK(dgels, DGELS)(char *, HYPRE_Int *, HYPRE_Int *, HYPRE_Int *, HYPRE_Real *,
HYPRE_Int *, HYPRE_Real *, HYPRE_Int *, HYPRE_Real *, HYPRE_Int *, HYPRE_Int *);
#ifdef __cplusplus
}
#endif
#endif
#ifndef ABS
#define ABS(x) ((x)>0 ? (x) : -(x))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x)
{
HYPRE_Real temp = 0.;
HYPRE_Int i;
for (i=0; i<n; i++)
temp = temp + x[i]*x[i];
return sqrt(temp);
}
static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x)
{
HYPRE_Int i;
for (i=0; i<n; i++)
x[i] = a * x[i];
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixClone
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **Sp,
HYPRE_Int copy_data)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
hypre_ParCSRMatrix *S;
S = hypre_ParCSRMatrixCreate(comm, n, n, row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
hypre_ParCSRMatrixInitialize(S); /* allocate memory */
hypre_ParCSRMatrixCopy(A,S,copy_data);
*Sp = S;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFillSmooth
* - fill in smooth matrix
* - this function will scale the smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples,
hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A,
HYPRE_Int num_functions, HYPRE_Int *dof_func)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, k, ii, index, start;
HYPRE_Int num_cols_offd;
HYPRE_Int num_sends;
HYPRE_Int *dof_func_offd;
HYPRE_Int *int_buf_data;
HYPRE_Real temp;
HYPRE_Real *p;
HYPRE_Real *p_offd;
HYPRE_Real *p_ptr;
HYPRE_Real *buf_data;
HYPRE_Real nm;
#if 0
HYPRE_Real mx = 0., my = 1.e+10;
#endif
/* normalize each sample vector and divide by number of samples */
for (k=0; k<nsamples; k++)
{
nm = mydnrm2(n, samples+k*n);
nm = 1./nm/nsamples;
mydscal(n, nm, samples+k*n);
}
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real,hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd);
p_ptr = p_offd;
p = samples;
for (k = 0; k < nsamples; k++)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
buf_data[index++]
= p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data,
p_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
p = p+n;
p_offd = p_offd+num_cols_offd;
}
hypre_TFree(buf_data);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
int_buf_data = hypre_CTAlloc(HYPRE_Int,hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
for (i = 0; i < n; i++)
{
for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++)
{
ii = S_diag_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func[ii])
{
S_diag_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_diag_data[j] == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p[ii]);
p = p + n;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_diag_data[j] = temp;
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
ii = S_offd_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func_offd[ii])
{
S_offd_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_offd_data[j] == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
p_offd = p_ptr;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p_offd[ii]);
p = p + n;
p_offd = p_offd + num_cols_offd;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_offd_data[j] = temp;
}
}
#if 0
hypre_printf("MIN, MAX: %f %f\n", my, mx);
#endif
hypre_TFree(p_ptr);
if (num_functions > 1)
hypre_TFree(dof_func_offd);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixChooseThresh
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j;
HYPRE_Real mx, minimax = 1.e+10;
HYPRE_Real minmin;
for (i=0; i<n; i++)
{
mx = 0.;
for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++)
mx = hypre_max(mx, S_diag_data[j]);
for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++)
mx = hypre_max(mx, S_offd_data[j]);
if (mx != 0.)
minimax = hypre_min(minimax, mx);
}
hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm);
return minmin;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int count, i, jS, jA;
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_diag; i++)
if (A_diag_data[i] >= thresh)
count++;
/* allocate vectors */
S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1);
S_diag_j = hypre_CTAlloc(HYPRE_Int, count);
S_diag_data = hypre_CTAlloc(HYPRE_Real, count);
jS = 0;
for (i = 0; i < n; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= thresh)
{
S_diag_data[jS] = A_diag_data[jA];
S_diag_j[jS] = A_diag_j[jA];
jS++;
}
}
}
S_diag_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_diag) = jS;
/* free the vectors we don't need */
hypre_TFree(A_diag_i);
hypre_TFree(A_diag_j);
hypre_TFree(A_diag_data);
/* assign the new vectors */
hypre_CSRMatrixI(A_diag) = S_diag_i;
hypre_CSRMatrixJ(A_diag) = S_diag_j;
hypre_CSRMatrixData(A_diag) = S_diag_data;
/*
* Offd part
*/
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_offd; i++)
if (A_offd_data[i] >= thresh)
count++;
/* allocate vectors */
S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1);
S_offd_j = hypre_CTAlloc(HYPRE_Int, count);
S_offd_data = hypre_CTAlloc(HYPRE_Real, count);
jS = 0;
for (i = 0; i < n; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= thresh)
{
S_offd_data[jS] = A_offd_data[jA];
S_offd_j[jS] = A_offd_j[jA];
jS++;
}
}
}
S_offd_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_offd) = jS;
/* free the vectors we don't need */
hypre_TFree(A_offd_i);
hypre_TFree(A_offd_j);
hypre_TFree(A_offd_data);
/* assign the new vectors */
hypre_CSRMatrixI(A_offd) = S_offd_i;
hypre_CSRMatrixJ(A_offd) = S_offd_j;
hypre_CSRMatrixData(A_offd) = S_offd_data;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothVecs
* - smoother depends on the level being used
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothVecs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Int num_sweeps,
HYPRE_Int level,
HYPRE_Real **SmoothVecs_p)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_ParVector *Zero;
hypre_ParVector *Temp;
hypre_ParVector *U;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int i;
HYPRE_Int n = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int sample;
HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data);
HYPRE_Int ret;
HYPRE_Real *datax, *bp, *p;
HYPRE_Int rlx_type;
HYPRE_Int smooth_type;
HYPRE_Int smooth_option = 0;
HYPRE_Int smooth_num_levels;
HYPRE_Solver *smoother;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (debug_flag >= 1)
hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps,
nsamples);
smooth_type = hypre_ParAMGDataSmoothType(amg_data);
smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data);
if (smooth_num_levels > level)
{
smooth_option = smooth_type;
smoother = hypre_ParAMGDataSmoother(amg_data);
num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data);
}
rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0];
/* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */
/* omega = hypre_ParAMGDataOmega(amg_data)[level]; */
/* generate par vectors */
Zero = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Zero,0);
hypre_ParVectorInitialize(Zero);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero));
for (i=0; i<n_local; i++)
datax[i] = 0.;
Temp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Temp,0);
hypre_ParVectorInitialize(Temp);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp));
for (i=0; i<n_local; i++)
datax[i] = 0.;
U = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(U,0);
hypre_ParVectorInitialize(U);
datax = hypre_VectorData(hypre_ParVectorLocalVector(U));
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Qtemp);
hypre_ParVectorSetPartitioningOwner(Qtemp,0);
}
/* allocate space for the vectors */
bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local);
p = bp;
/* generate random vectors */
for (sample=0; sample<nsamples; sample++)
{
for (i=0; i<n_local; i++)
datax[i] = (rand()/(HYPRE_Real)RAND_MAX) - .5;
for (i=0; i<num_sweeps; i++)
{
if (smooth_option == 6)
{
HYPRE_SchwarzSolve(smoother[level],
(HYPRE_ParCSRMatrix) A,
(HYPRE_ParVector) Zero,
(HYPRE_ParVector) U);
}
else
{
ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/,
rlx_type , 0 /*rel pts*/, 1.0 /*weight*/,
1.0 /*omega*/, NULL, U, Temp,
Qtemp);
hypre_assert(ret == 0);
}
}
/* copy out the solution */
for (i=0; i<n_local; i++)
*p++ = datax[i];
}
hypre_ParVectorDestroy(Zero);
hypre_ParVectorDestroy(Temp);
hypre_ParVectorDestroy(U);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
*SmoothVecs_p = bp;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothDirs replaces CreateS in AMG
* - smoother depends on the level being used
* - in this version, CreateSmoothVecs must be called prior to this function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothDirs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Real *SmoothVecs,
HYPRE_Real thresh,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParCSRMatrix *S;
HYPRE_Real minimax;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
hypre_ParCSRMatrixClone(A, &S, 0);
/* Traverse S and fill in differences */
hypre_ParCSRMatrixFillSmooth(
hypre_ParAMGDataNumSamples(amg_data), SmoothVecs,
S, A, num_functions, dof_func);
minimax = hypre_ParCSRMatrixChooseThresh(S);
if (debug_flag >= 1)
hypre_printf("Minimax chosen: %f\n", minimax);
/* Threshold and compress */
hypre_ParCSRMatrixThreshold(S, thresh*minimax);
*S_ptr = S;
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGNormalizeVecs
*
* Normalize the smooth vectors and also make the first vector the constant
* vector
*
* inputs:
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
*
* output:
* V = adjusted smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V)
{
HYPRE_Int i, j;
HYPRE_Real nrm;
/* change first vector to the constant vector */
for (i=0; i<n; i++)
V[i] = 1.0;
for (j=0; j<num; j++)
{
nrm = mydnrm2(n, &V[j*n]);
mydscal(n, 1./nrm, &V[j*n]);
}
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGFitVectors
*
* Construct interpolation weights based on fitting smooth vectors
*
* inputs:
* ip = row number of row in P being processed (0-based)
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
* nc = number of coarse grid points
* ind = indices of coarse grid points (0-based)
*
* output:
* val = interpolation weights for the coarse grid points
* V = smooth vectors; first one has been changed to constant vector;
* vectors have also been normalized; this is also an input
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V,
HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val)
{
HYPRE_Real *a, *b;
HYPRE_Real *ap;
HYPRE_Int i, j;
HYPRE_Real *work;
HYPRE_Int work_size;
HYPRE_Int info;
HYPRE_Int temp;
/*
hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc);
for (i=0; i<nc; i++)
hypre_printf("%d ", ind[i]);
hypre_printf("\n");
*/
if (nc == 0)
return 0;
work_size = 2000*64;
work = hypre_CTAlloc(HYPRE_Real, work_size);
a = hypre_CTAlloc(HYPRE_Real, num*nc);
ap = a;
for (j=0; j<nc; j++)
{
for (i=0; i<num; i++)
{
*ap = V[i*n+ind[j]];
ap++;
}
}
temp = MAX(nc, num);
b = hypre_CTAlloc(HYPRE_Real, temp);
for (i=0; i<num; i++)
b[i] = V[i*n+ip];
#ifdef HYPRE_USING_ESSL
dgells(0, a, num, b, num, val, nc, NULL, 1.e-12, num, nc, 1,
&info, work, work_size);
#else
{
char trans = 'N';
HYPRE_Int one = 1;
hypre_F90_NAME_LAPACK(dgels, DGELS)(&trans, &num, &nc, &one, a, &num,
b, &temp, work, &work_size, &info);
if (info != 0)
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n");
/* copy solution into output vector */
for (j=0; j<nc; j++)
val[j] = b[j];
}
#endif
hypre_TFree(b);
hypre_TFree(a);
hypre_TFree(work);
return info;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpLS
*
* Interpolation built from fitting smooth vectors
* - sequential version only
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int num_smooth,
HYPRE_Real *SmoothVecs,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
/* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
/* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
/* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
/* HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_Int *S_ext_j; */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker;
/* HYPRE_Int *P_marker_offd; */
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
/* HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd; */
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
/*
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
*/
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
/* removed */
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
HYPRE_Int kk;
HYPRE_Int indices[1000]; /* kludge */
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
kk = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i1];
jj_counter++;
indices[kk] = i1;
kk++;
}
}
hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,
kk, indices, &P_diag_data[P_diag_i[i]]);
/* Off-Diagonal part of P */
/* undone */
}
}
}
P_diag_i[i] = jj_counter; /* check that this is in right place for threads */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
for (i=0; i < num_cols_P_offd; i++)
col_map_offd_P[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpGSMG
*
* Difference with hypre_BoomerAMGBuildInterp is that S contains values
* and is used to build interpolation weights. Matrix A is not used.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_Int *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int start;
HYPRE_Int c_num;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(S);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
total_global_cpts = 0; /* we will set this later for the matrix in the setup */
/* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd);
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_S_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
/* Loop over ith row of S. First, the diagonal part of S */
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += S_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
sum += S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
sum += S_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = S_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
P_diag_data[P_marker[i2]]
+= distribute * S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i2]]
+= distribute * S_offd_data[jj1];
}
}
}
else
{
/* do nothing */
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else
{
/* do nothing */
}
}
/*----------------------------------------------------------------
* Still looping over ith row of S. Next, loop over the
* off-diagonal part of S
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = S_offd_j[jj];
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
i2 = S_ext_j[jj1];
if (i2 >= col_1 && i2 < col_n)
{
/* in the diagonal block */
if (P_marker[i2-col_1] >= jj_begin_row)
sum += S_ext_data[jj1];
}
else
{
/* in the off_diagonal block */
j = hypre_BinarySearch(col_map_offd,i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
sum += S_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = S_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
i2 = S_ext_j[jj1];
if (i2 >= col_1 && i2 < col_n) /* in the diagonal block */
{
if (P_marker[i2-col_1] >= jj_begin_row)
P_diag_data[P_marker[i2-col_1]]
+= distribute * S_ext_data[jj1];
}
else
{
/* check to see if it is in the off_diagonal block */
j = hypre_BinarySearch(col_map_offd,i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[j]]
+= distribute * S_ext_data[jj1];
}
}
}
}
else
{
/* do nothing */
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else
{
/* do nothing */
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
sum = 0.;
for (jj = jj_begin_row; jj < jj_end_row; jj++)
sum += P_diag_data[jj];
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
sum += P_offd_data[jj];
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= sum;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= sum;
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
for (i=0; i < num_cols_P_offd; i++)
col_map_offd_P[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
|
omp_taskwait.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_taskwait()
{
int result1 = 0; /* Stores number of not finished tasks after the taskwait */
int result2 = 0; /* Stores number of wrong array elements at the end */
int array[NUM_TASKS];
int i;
/* fill array */
for (i = 0; i < NUM_TASKS; i++)
array[i] = 0;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task
{
my_sleep (SLEEPTIME);
array[myi] = 1;
} /* end of omp task */
} /* end of for */
#pragma omp taskwait
/* check if all tasks were finished */
for (i = 0; i < NUM_TASKS; i++)
if (array[i] != 1)
result1++;
/* generate some more tasks which now shall overwrite
* the values in the tids array */
for (i = 0; i < NUM_TASKS; i++) {
int myi;
myi = i;
#pragma omp task
{
array[myi] = 2;
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* final check, if all array elements contain the right values: */
for (i = 0; i < NUM_TASKS; i++) {
if (array[i] != 2)
result2++;
}
return ((result1 == 0) && (result2 == 0));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_taskwait()) {
num_failed++;
}
}
return num_failed;
}
|
conv_func_helper.h | /* Copyright (c) 2018 Anakin Authors, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ANAKIN_TEST_SABER_CONV_FUNC_HELPER_H
#define ANAKIN_TEST_SABER_CONV_FUNC_HELPER_H
#include <vector>
#include "saber/core/context.h"
#include "saber/core/tensor.h"
#include "saber/saber_funcs_param.h"
#include "saber/funcs/conv.h"
#include "saber/saber_types.h"
#include "saber/funcs/saber_util.h"
namespace anakin {
namespace saber {
template<typename targetType>
void pool_basic_check_int8(Tensor<targetType> &tensor_in,Tensor<targetType> &tensor_out,
int kernel_w, int kernel_h, int stride_w, int stride_h,
int pad_w, int pad_h, PoolingType pooling_type, round_mode rm = nearest) {
CHECK(tensor_in.get_dtype()==AK_UINT8||tensor_in.get_dtype()==AK_INT8)<<"only support int8 in";
CHECK(tensor_out.get_dtype()==AK_UINT8||tensor_out.get_dtype()==AK_INT8)<<"only support int8 out";
auto src_ptr = static_cast<const char *>(tensor_in.data());
auto dst_ptr = static_cast<char *>(tensor_out.mutable_data());
int in_n = tensor_in.num();
int in_c = tensor_in.channel();
int in_h = tensor_in.height();
int in_w = tensor_in.width();
int size_in_n = in_c * in_h * in_w;
int size_in_c = 1;
int out_h = tensor_out.height();
int out_w = tensor_out.width();
int size_out_n = in_c * out_h * out_w;
int size_out_c = 1;
for (int ind_n = 0; ind_n < in_n; ++ind_n) {
for (int ind_h = 0; ind_h < out_h; ++ind_h) {
int sh = ind_h * stride_h;
int eh = sh + kernel_h;
if (pad_h > 0) {
sh = (sh-pad_h) < 0 ? 0 : sh-pad_h;
eh = (eh-pad_h) > in_h ? in_h : eh-pad_h;
}
for (int ind_w = 0; ind_w < out_w; ++ind_w) {
int sw = ind_w * stride_w;
int ew = sw + kernel_w;
if (pad_w > 0) {
sw = (sw - pad_w) < 0 ? 0 : sw-pad_w;
ew = (ew - pad_w) > in_w ? in_w:ew-pad_w;
}
float result = 0;
for (int ind_c = 0; ind_c < in_c; ++ind_c) {
int dst_ind = ind_n * size_out_n + ind_h * out_w * in_c + ind_w * in_c + ind_c;
for (int kh = sh; kh < eh; ++kh) {
for (int kw = sw; kw < ew; ++kw) {
int src_ind = ind_n * size_in_n + kh * in_w * in_c + kw * in_c + ind_c;
if (kh == sh && kw == sw) {
result = src_ptr[src_ind];
} else {
if (pooling_type == Pooling_max) {
result = result >= src_ptr[src_ind] ? result : src_ptr[src_ind];
}
if (pooling_type == Pooling_average_include_padding) {
result += src_ptr[src_ind];
}
if (pooling_type == Pooling_average_exclude_padding) {
result += src_ptr[src_ind];
}
}
}
}
if (pooling_type == Pooling_average_include_padding) {
result /= kernel_h * kernel_w;
}
if (pooling_type == Pooling_average_exclude_padding) {
result /= (ew-sw) * (eh-sh);
}
dst_ptr[dst_ind] = (unsigned char)nearbyintf(result);
}
}
}
}
}
template<typename targetType>
void conv_basic_check_int8(Tensor<targetType> &tensor_in,Tensor<targetType> &tensor_out,
const char *weights, const int *bias, int group,
int kernel_w, int kernel_h, int stride_w, int stride_h, int dilation_w, int dilation_h,
int pad_w, int pad_h, bool flag_bias, bool flag_relu, std::vector<float> &scale, EltwiseParam<targetType> *elt_param = NULL,
float beta = 0.f, round_mode rm = nearest) {
auto src_data_uint8 = reinterpret_cast<const unsigned char*>(tensor_in.data());
auto src_data_int8 = reinterpret_cast<const char*>(tensor_in.data());
auto dst_data_ref = reinterpret_cast<char*>(tensor_out.mutable_data());
auto weights_data = weights;
bool with_bias = flag_bias;
auto bias_data = bias;
int in_num = tensor_out.num();
int out_channels = tensor_out.channel();
int out_h = tensor_out.height();
int out_w = tensor_out.width();
int in_channel = tensor_in.channel();
int in_h = tensor_in.height();
int in_w = tensor_in.width();
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
float sum_scale = 1.f;
if (elt_param && (elt_param->operation == Eltwise_sum)) {
sum_scale = elt_param->coeff[1];
}
if (tensor_in.get_layout() == Layout_NHWC && tensor_out.get_layout() == Layout_NHWC) {
#pragma omp parallel for num_threads(8) collapse(5) schedule(static)
for (int n = 0; n < in_num; ++n) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
int out_idx = n * out_h * out_w * group * out_c_group
+ oh * out_w * group * out_c_group + ow * group * out_c_group + g * out_c_group + oc;
float bias_d = with_bias ? (float)(bias_data[g * out_c_group + oc]) : 0.f;
float computing_v = bias_d + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dilation_w);
int ih = oh * stride_h - pad_h + kh * (dilation_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_h * in_w * in_channel
+ ih * in_w * group * in_c_group
+ iw * group * in_c_group
+ g * in_c_group
+ ic;
int widx = g * out_c_group * in_c_group * kernel_h * kernel_w
+ oc * in_c_group * kernel_h * kernel_w
+ ic * kernel_h * kernel_w
+ kh * kernel_w
+ kw;
if (tensor_in.get_dtype() == AK_INT8) {
computing_v += (float)src_data_int8[iidx] * weights_data[widx];
}
else {
computing_v += (float)src_data_uint8[iidx] * weights_data[widx];
}
}
}
}
computing_v = computing_v * scale[g * out_c_group + oc];
if (elt_param && (elt_param->operation == Eltwise_sum)) {
computing_v += dst_data_ref[out_idx] * sum_scale;
}
if (flag_relu) {
computing_v = computing_v > 0.f ? computing_v : 0.f;
}
switch (rm) {
case nearest: dst_data_ref[out_idx] = saturate<int8_t>((int32_t)nearbyintf(computing_v)); break;
case down: dst_data_ref[out_idx] = saturate<int8_t>((int32_t)floorf(computing_v)); break ;
}
// LOG(INFO) << "computing_v:" << computing_v << " scale[g*out_c_group + oc]" << scale[g*out_c_group + oc] << " out_idx:" << out_idx;
// LOG(INFO) << "out_idx:" << out_idx << " dst_data_ref[out_idx]:" << (int)dst_data_ref[out_idx];
}
}
}
}
}
}
}
template<typename targetType, typename out_dtype = float, typename in_dtype = out_dtype>
void conv_basic_check(Tensor<targetType> &tensor_in,Tensor<targetType> &tensor_out,
const in_dtype *weights, const out_dtype *bias, int group,
int kernel_w, int kernel_h, int stride_w, int stride_h, int dilation_w, int dilation_h,
int pad_w, int pad_h, bool flag_bias, bool flag_relu, float beta = 0.f, float alpha = 1.f) {
auto src_data = reinterpret_cast<const in_dtype*>(tensor_in.data());
auto dst_data_ref = reinterpret_cast<out_dtype*>(tensor_out.mutable_data());
auto weights_data = weights;
bool with_bias = flag_bias;
auto bias_data = bias;
int in_num = tensor_out.num();
int out_channels = tensor_out.channel();
int out_h = tensor_out.height();
int out_w = tensor_out.width();
int in_channel = tensor_in.channel();
int in_h = tensor_in.height();
int in_w = tensor_in.width();
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
#pragma omp parallel for num_threads(8) collapse(5) schedule(static)
for (int n = 0; n < in_num; ++n) {
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w + g * out_c_group * out_h * out_w
+ oc * out_h * out_w + oh * out_w + ow;
float bias_d = with_bias ? (float)(bias_data[g * out_c_group + oc]) : 0.f;
dst_data_ref[out_idx] = dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dilation_w);
int ih = oh * stride_h - pad_h + kh * (dilation_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w
+ g * in_c_group * in_h * in_w
+ ic * in_h * in_w
+ ih * in_w
+ iw;
int widx = g * out_c_group * in_c_group * kernel_h * kernel_w
+ oc * in_c_group * kernel_h * kernel_w
+ ic * kernel_h * kernel_w
+ kh * kernel_w
+ kw;
dst_data_ref[out_idx]
+= (out_dtype)src_data[iidx]
* (out_dtype)weights_data[widx];
// LOG(INFO) << "out_idx = " << out_idx << " iidx = " << iidx << " res = " << dst_data_ref[out_idx];
}
}
}
dst_data_ref[out_idx] *= alpha;
dst_data_ref[out_idx] += bias_d;
if (flag_relu) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > 0.f ? dst_data_ref[out_idx] : 0.f;
}
}
}
}
}
}
}
template<typename dtype,typename TargetType_D,typename TargetType_H>
void conv_cpu_func(const std::vector<Tensor<TargetType_H>*>& input,
std::vector<Tensor<TargetType_H>*>& output,
ConvParam<TargetType_D>& param) {
int group = param.group;
int input_num = input[0]->num();
int input_channel = input[0]->channel();
int input_height = input[0]->height();
int input_width = input[0]->width();
int output_channel = output[0]->channel();
int output_height = output[0]->height();
int output_width = output[0]->width();
int stride_h = param.stride_h;
int stride_w = param.stride_w;
int dilation_h = param.dilation_h;
int dilation_w = param.dilation_w;
int pad_h = param.pad_h;
int pad_w = param.pad_w;
int kernel_h = param.weight()->height();
int kernel_w = param.weight()->width();
bool bias_term = param.bias()->valid_size() > 0;
bool with_relu = param.activation_param.has_active;
Tensor<TargetType_H> weights_host;
Tensor<TargetType_H> bias_host;
weights_host.re_alloc(param.weight()->valid_shape(), AK_FLOAT);
weights_host.copy_from(*(param.weight()));
bias_host.re_alloc(param.bias()->valid_shape(), AK_FLOAT);
bias_host.copy_from(*(param.bias()));
const dtype* bias_ptr = bias_term ? (const float*)bias_host.data() : nullptr;
conv_basic_check<TargetType_H>(*input[0], *output[0],
(const dtype*)weights_host.data(), bias_ptr,
group, kernel_w, kernel_h, stride_w, stride_h,
dilation_w, dilation_h, pad_w, pad_h, bias_term,
with_relu);
}
}
}
#endif //ANAKIN_CONV_FUNC_HELPER_H
|
central_difference.h | #include <stdio.h>
static inline long long SUB2IND(const long long j, const long long i, const long long k,
const long long row_size, const long long col_size,
const long long n_channels) {
//return ((i + col_size * j) * n_channels) + k;
return (i + col_size * j) + (row_size * col_size * k);
}
template<typename T>
void central_difference(const T* in, const long long rows,
const long long cols, const long long n_channels,
T* out) {
const long long n_output_channels = n_channels * 2;
long long output_index = 0;
#pragma omp for
for (long long k = 0; k < n_channels; ++k) {
// row-derivative
for (long long i = 0; i < cols; ++i) {
for (long long j = 0; j < rows; ++j) {
if (j == 0) {
output_index = SUB2IND(0, i, k, rows, cols, n_output_channels);
out[output_index] = in[SUB2IND(1, i, k, rows, cols, n_channels)] - in[SUB2IND(0, i, k, rows, cols, n_channels)];
}
else if (j == rows - 1) {
output_index = SUB2IND(j, i, k, rows, cols, n_output_channels);
out[output_index] = in[SUB2IND(j, i, k, rows, cols, n_channels)] - in[SUB2IND(j - 1, i, k, rows, cols, n_channels)];
}
else {
output_index = SUB2IND(j, i, k, rows, cols, n_output_channels);
out[output_index] = (in[SUB2IND(j + 1, i, k, rows, cols, n_channels)] - in[SUB2IND(j - 1, i, k, rows, cols, n_channels)]) / 2.0;
}
}
}
// column-derivative
for (long long j = 0; j < rows; ++j) {
for (long long i = 0; i < cols; ++i) {
if (i == 0) {
output_index = SUB2IND(j, 0, n_channels + k , rows, cols, n_output_channels);
out[output_index] = in[SUB2IND(j, 1, k, rows, cols, n_channels)] - in[SUB2IND(j, 0, k, rows, cols, n_channels)];
}
else if (i == cols - 1) {
output_index = SUB2IND(j, cols - 1, n_channels + k, rows, cols, n_output_channels);
out[output_index] = in[SUB2IND(j, i, k, rows, cols, n_channels)] - in[SUB2IND(j, i - 1, k, rows, cols, n_channels)];
}
else {
output_index = SUB2IND(j, i, n_channels + k, rows, cols, n_output_channels);
out[output_index] = (in[SUB2IND(j, i + 1, k, rows, cols, n_channels)] - in[SUB2IND(j, i - 1, k, rows, cols, n_channels)]) / 2.0;
}
}
}
}
}
|
buildingGABLS.c | #include "grid/octree.h"
#include "navier-stokes/centered.h"
#include "SGS/Vreman.h"
#include "tracer.h"
#include "diffusion.h"
#include "grid/adaptLES.h"
#define MAXLEVEL 5
#define temp 5
#define Re 1e3
#define sq(x) x * x
scalar T[];
scalar * tracers = {T};
mgstats mgT;
face vector av[];
double f = 1.39e-4;
int main()
{
T[bottom]=dirichlet((265-(0.25*(t/3600))));
T[top] = dirichlet (268);
u.t[bottom]=dirichlet(0);
u.t[top]=neumann(0);
periodic (right);
periodic (front);
L0 = 400.;
X0 = Y0 = Z0 = 0;
DT = 10;
init_grid (1 << MAXLEVEL);
a=av;
run();
}
face vector muv[];
event init(t=0)
{
adaptLESinit(MAXLEVEL);
mu=muv;
foreach()
{
u.x[]= 8;
T[]=((265+0.1*noise())*(y<50))+((y>50)*265) + (((0.01*(y-100)))*(y>100));
}
boundary(all);
}
event acceleration (i++)
{
face vector D = mu;
mgT = diffusion (T, dt, D);
foreach()
{
av.x[] = f*u.z[]-((y>300)*(u.x[]-8));
av.y[] = 9.81*(T[]+T[0,-1,0])/(2*265)-((y>300)*u.y[]);
av.z[] = f*(8-u.x[])-((y>300)*u.z[]);
}
boundary(all);
}
event SGS (i++)
{
scalar muB[];
eddyviscosity(0.17,u,muB);
boundary({muB});
foreach_face()
{
muv.x[]=(muB[]+muB[-1,0,0])/2;
}
boundary((scalar *){muv});
}
event logfile (t <= temp; t += 1)
{
stats s = statsf (u.x);
stats m = statsf(mu.x);
fprintf (ferr, "t = %g\ti = %d\tdt = %g\tmin(Evis) = %g\nmax(Evis) = %g\tavg(Evis) = %g\tavg(ux) = %g \n",t, i, dt,m.min ,m.max,(float)m.sum/(L0*L0*L0), (float)s.sum/(L0*L0*L0));
}
event output (t += 1)
{
float Delt = L0/N;
char name[80];
sprintf (name, "Tprof%d.dat",(int)roundf(t));
FILE * fp4 = fopen (name, "w");
sprintf (name, "uxprof%d.dat",(int)roundf(t));
FILE * fp5 = fopen (name, "w");
sprintf (name, "uzprof%d.dat",(int)roundf(t));
FILE * fp6 = fopen (name, "w");
sprintf (name, "LEVELprof%d.dat",(int)roundf(t));
FILE * fp7 = fopen (name, "w");
sprintf (name, "Yprof%d.dat",(int)roundf(t));
FILE * fp8 = fopen (name, "w");
#ifdef _OPENMP
#pragma omp parallel for reduction(+:F) default(shared)
#endif
for (int k = 0; k < N; k++)
{
double uz=0 , ux=0 , F= 0, LEVEL=0, yz=0; ;
float yp = Delt * k + Y0 + Delt/2;
for (int i = 0; i < N; i++)
{
float xp = Delt*i + X0 + Delt/2.;
for (int j = 0; j < N; j++)
{
float zp = Delt*j + X0 + Delt/2.;
Point point = locate (xp, yp,zp);
F += T[];
uz+= u.z[];
ux+= u.x[];
LEVEL += level;
yz += y;
}
}
fprintf(fp4,"%g\t%g \n",yp,F/(N*N));
fprintf(fp5,"%g\t%g \n",yp,ux/(N*N));
fprintf(fp6,"%g\t%g \n",yp,uz/(N*N));
fprintf(fp7,"%g\t%g \n",yp,LEVEL/(N*N));
fprintf(fp8,"%g\t%g \n",yp,yz/(N*N));
}
fclose(fp4);
fclose(fp5);
fclose(fp6);
fclose(fp7);
fclose(fp8);
}
void levels(double YY[],int LL[],int maxlevel)
{
int DD = pow(2,maxlevel), l = 0;
float xp = Z0+(L0/(DD*2)), zp = Z0 + (L0/(DD*2));
for (int k=0;k<DD;k++)
{
float yp = Y0+(L0/(DD*2))+k*(L0/DD);
Point point = locate (xp, yp,zp);
if (k==0)
{
YY[l]=y;
LL[l]=level;
l++;
}
else if (y!=YY[l-1] )
{
YY[l]=y;
LL[l]=level;
l++;
}
}
}
event adapt (t+=1)
{
double Yz[1<<MAXLEVEL] = {0};
int Lz[1<<MAXLEVEL] = {0};
levels(Yz,Lz,MAXLEVEL);
int m=0 , n=0;
while(Yz[m]>0.01)
{
fprintf(ferr,"\t%d\t%g\t%d \n ",m+1,Yz[m] ,Lz[m]);
n+=pow(2,Lz[m]*2);
m++;
}
fprintf(ferr,"\t#gridcells ~ %g ^ 3\n",round(pow(n,(0.33333))));
adaptLESrun(MAXLEVEL,u,T,mu.y,Lz,Yz);
boundary(all);
}
|
superlu_iterative_solver.h | /*
* =======================================================================*
* kkkk kkkk kkkkkkkkkk kkkkk kkkkkkkkkk kkkkkkkkkk kkkkkkkkkK *
* kkkk kkkk kkkk kkkk kkkkkk kkkkkkkkkk kkkkkkkkkk kkkkkkkkkK *
* kkkkkkkkk kkkk kkkk kkkkkkk kkkk kkk kkk kkkk *
* kkkkkkkkk kkkkkkkkkkk kkkk kkk kkkk kkk kkk kkkk *
* kkkk kkkk kkkk kkkk kkkk kkkk kkkk kkk kkk kkkk *
* kkkk kkkk kkkk kkkk kkkk kkkk kkkk kkkkkkkkkk kkkkkkkkkk *
* kkkk kkkk kkkk kkkk kkkk kkkk kkkk kkkkkkkkkk kkkkkkkkkk *
* *
* krATos: a fREe opEN sOURce CoDE for mULti-pHysIC aDaptIVe SoLVErS, *
* aN extEnsIBLe OBjeCt oRiEnTEd SOlutION fOR fInITe ELemEnt fORmULatIONs *
* Copyleft by 2003 ciMNe *
* Copyleft by 2003 originary authors Copyleft by 2003 your name *
* This library is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Lesser General Public License as *
* published by the Free Software Foundation; either version 2.1 of *
* the License, or any later version. *
* *
* This library is distributed in the hope that it will be useful, but *
* WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU Lesser General Public License for more details. *
* *
* You should have received a copy of the GNU Lesser General Public *
* License along with this library; if not, write to International Centre *
* for Numerical Methods in Engineering (CIMNE), *
* Edifici C1 - Campus Nord UPC, Gran Capità s/n, 08034 Barcelona. *
* *
* You can also contact us to the following email address: *
* kratos@cimne.upc.es *
* or fax number: +34 93 401 65 17 *
* *
* Created at Institute for Structural Mechanics *
* Ruhr-University Bochum, Germany *
* Last modified by: $Author: rrossi $ *
* Date: $Date: 2009-01-15 11:11:35 $ *
* Revision: $Revision: 1.5 $ *
*========================================================================*
* International Center of Numerical Methods in Engineering - CIMNE *
* Barcelona - Spain *
*========================================================================*
*/
#if !defined(KRATOS_SUPERLU_ITERATIVE_SOLVER_H_INCLUDED )
#define KRATOS_SUPERLU_ITERATIVE_SOLVER_H_INCLUDED
// External includes
#include "includes/ublas_interface.h"
// #include "external_includes/superlu/superlu.hpp"
#include "SRC/slu_ddefs.h"
// Project includes
#include "includes/define.h"
#include "linear_solvers/direct_solver.h"
namespace ublas = boost::numeric::ublas;
namespace Kratos
{
extern "C"
{
void dcopy_(int *, double [], int *, double [], int *);
}
//some definitions for SuperLU iterative solver
char *GLOBAL_EQUED;
superlu_options_t *GLOBAL_OPTIONS;
double *GLOBAL_R, *GLOBAL_C;
int *GLOBAL_PERM_C, *GLOBAL_PERM_R;
SuperMatrix *GLOBAL_A, *GLOBAL_L, *GLOBAL_U;
// SuperMatrix *GLOBAL_A, *GLOBAL_A_ORIG, *GLOBAL_L, *GLOBAL_U;
SuperLUStat_t *GLOBAL_STAT;
mem_usage_t *GLOBAL_MEM_USAGE;
void dmatvec_mult(double alpha, double x[], double beta, double y[])
{
SuperMatrix *A = GLOBAL_A;
char flag[] = "N";
sp_dgemv(flag, alpha, A, x, 1, beta, y, 1);
}
void dpsolve(int n, double x[], double y[])
{
int i_1 = 1;
SuperMatrix *L = GLOBAL_L, *U = GLOBAL_U;
SuperLUStat_t *stat = GLOBAL_STAT;
int *perm_c = GLOBAL_PERM_C, *perm_r = GLOBAL_PERM_R;
int info;
static DNformat X;
static SuperMatrix XX = {SLU_DN, SLU_D, SLU_GE, 1, 1, &X};
dcopy_(&n, y, &i_1, x, &i_1);
XX.nrow = n;
X.lda = n;
X.nzval = x;
dgstrs(NOTRANS, L, U, perm_c, perm_r, &XX, stat, &info);
}
//some initial definitions
extern "C"
{
int dfgmr( int n,
void (*matvec_mult)(double, double [], double, double []),
void (*psolve)(int n, double [], double[]),
double *rhs, double *sol, double tol, int restrt, int *itmax,
FILE *fits);
}
extern "C"
{
int dfill_diag(int n, NCformat *Astore);
}
extern "C"
{
double dnrm2_(int *, double [], int *);
}
extern "C"
{
void daxpy_(int *, double *, double [], int *, double [], int *);
}
extern "C"
{
void dCompRow_to_CompCol(int , int , int , double *, int *, int *, double **, int **, int **);
}
template< class TSparseSpaceType, class TDenseSpaceType,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class SuperLUIterativeSolver : public DirectSolver< TSparseSpaceType,
TDenseSpaceType, TReordererType>
{
public:
/**
* Counted pointer of SuperLUIterativeSolver
*/
KRATOS_CLASS_POINTER_DEFINITION( SuperLUIterativeSolver );
typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType;
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
typedef typename TSparseSpaceType::VectorType VectorType;
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
SuperLUIterativeSolver(Parameters settings)
{
KRATOS_TRY
Parameters default_parameters( R"(
{
"solver_type": "SuperLUIterativeSolver",
"tolerance" : 1.0e-6,
"max_iteration" : 200,
"gmres_krylov_space_dimension": 100,
"drop_tolerance": 1e-4 ,
"fill_tolerance": 1e-2 ,
"fill_factor" : 10 ,
"scaling":false
} )" );
//now validate agains defaults -- this also ensures no type mismatch
settings.ValidateAndAssignDefaults(default_parameters);
mTol = settings["tolerance"].GetDouble();
mmax_it = settings["max_iteration"].GetInt();
mrestart = settings["gmres_krylov_space_dimension"].GetInt();
mDropTol = settings["drop_tolerance"].GetDouble();
mFillTol = settings["fill_tolerance"].GetDouble();
mFillFactor = settings["fill_factor"].GetInt();
KRATOS_CATCH("")
}
/**
* Default constructor
*/
SuperLUIterativeSolver(double NewMaxTolerance,
int NewMaxIterationsNumber,
int restart,
double DropTol=1e-4,
double FillTol = 1e-2,
double FillFactor=10)
{
mTol = NewMaxTolerance;
mmax_it = NewMaxIterationsNumber;
mrestart = restart;
mDropTol = DropTol;
mFillTol = FillTol;
mFillFactor = FillFactor;
}
SuperLUIterativeSolver()
{
mTol = 1e-6;
mrestart = 150;
mmax_it = mrestart*3;
mDropTol = 1e-4;
mFillTol = 1e-2;
mFillFactor = 10;
}
/**
* Destructor
*/
~SuperLUIterativeSolver() override {};
/**
* Normal solve method.
* Solves the linear system Ax=b and puts the result on SystemVector& rX.
* rX is also th initial guess for iterative methods.
* @param rA. System matrix
* @param rX. Solution vector.
* @param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
//void dmatvec_mult(double alpha, double x[], double beta, double y[]);
//void dpsolve(int n, double x[], double y[]);
//////////////////////////////////////////////////////////////////////////////extern int dfgmr( int n,
// void (*matvec_mult)(double, double [], double, double []),
// void (*psolve)(int n, double [], double[]),
// double *rhs, double *sol, double tol, int restrt, int *itmax,
// FILE *fits);
// extern int dfill_diag(int n, NCformat *Astore);
char equed[1] = {'B'};
//yes_no_t equil;
// trans_t trans;
// SuperMatrix A, AA, L, U;
SuperMatrix A, L, U;
SuperMatrix B, X;
//double *a_orig;
//int *asub_orig, *xa_orig;
int *etree;
int *perm_c; /* column permutation vector */
int *perm_r; /* row permutations from partial pivoting */
int /*nrhs,*/ lwork, info, m, n;
double *work = NULL;
double *R, *C;
double rpg, rcond;
double zero = 0.0;
//double one = 1.0;
mem_usage_t mem_usage;
superlu_options_t options;
SuperLUStat_t stat;
int i;
//int restrt, iter, maxit, i;
double resid;
double *x, *b;
#ifdef DEBUG
extern int num_drop_L, num_drop_U;
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC("Enter main()");
#endif
/* Defaults */
lwork = 0;
// nrhs = 1;
// trans = NOTRANS;
/* Set the default input options:
options.Fact = DOFACT;
options.Equil = YES;
options.ColPerm = COLAMD;
options.DiagPivotThresh = 0.1; //different from complete LU
options.Trans = NOTRANS;
options.IterRefine = NOREFINE;
options.SymmetricMode = NO;
options.PivotGrowth = NO;
options.ConditionNumber = NO;
options.PrintStat = YES;
options.RowPerm = LargeDiag;
options.ILU_DropTol = 1e-4;
options.ILU_FillTol = 1e-2;
options.ILU_FillFactor = 10.0;
options.ILU_DropRule = DROP_BASIC | DROP_AREA;
options.ILU_Norm = INF_NORM;
options.ILU_MILU = SILU;
*/
ilu_set_default_options(&options);
options.ILU_MILU = SILU; //SMILU_3;
options.PrintStat = NO;
options.Trans = NOTRANS;
// options.RowPerm = NO;
options.ILU_DropTol = mDropTol;
options.ILU_FillTol = mFillTol;
options.ILU_FillFactor = mFillFactor;
// options.SymmetricMode = YES;
// options.ILU_DropRule = DROP_DYNAMIC;
// options.ILU_Norm = ONE_NORM;
// options.IterRefine = SLU_DOUBLE;
/* Modify the defaults. */
options.PivotGrowth = YES; /* Compute reciprocal pivot growth */
options.ConditionNumber = YES;/* Compute reciprocal condition number */
if ( lwork > 0 )
{
work =(double*) SUPERLU_MALLOC(lwork);
if ( !work ) ABORT("Malloc fails for work[].");
}
//copy matrix from kratos
//Fill the SuperLU matrices
//NRformat *Astore;
//NCformat *Ustore;
//SCformat *Lstore;
m = rA.size1();
n = rA.size2();
//create a copy of the matrix
int *index1_vector = new (std::nothrow) int[rA.index1_data().size()];
int *index2_vector = new (std::nothrow) int[rA.index2_data().size()];
for ( int unsigned i = 0; i < rA.index1_data().size(); i++ )
index1_vector[i] = (int)rA.index1_data()[i];
for ( unsigned int i = 0; i < rA.index2_data().size(); i++ )
index2_vector[i] = (int)rA.index2_data()[i];
//works also with dCreate_CompCol_Matrix
// dCreate_CompCol_Matrix (&A, rA.size1(), rA.size2(),
// rA.nnz(),
// rA.value_data().begin(),
// index2_vector, //can not avoid a copy as ublas uses unsigned int internally
// index1_vector, //can not avoid a copy as ublas uses unsigned int internally
// SLU_NC, SLU_D, SLU_GE //ARGH...maybe this should be SLU_NC
// );
// SuperMatrix Akratos;
// dCreate_CompRow_Matrix (&A, rA.size1(), rA.size2(),
// rA.nnz(),
// rA.value_data().begin(),
// index2_vector, //can not avoid a copy as ublas uses unsigned int internally
// index1_vector, //can not avoid a copy as ublas uses unsigned int internally
// SLU_NR, SLU_D, SLU_GE
// );
int_t *asubt, *xat;
double *at;
dCompRow_to_CompCol(rA.size1(), rA.size2(), rA.nnz(),rA.value_data().begin(), index2_vector, index1_vector,&at, &asubt, &xat);
dCreate_CompCol_Matrix(&A, rA.size1(), rA.size2(),rA.nnz(), at, asubt, xat, SLU_NC, SLU_D, SLU_GE);
delete [] index1_vector;
delete [] index2_vector;
// double **values;;
// int **i1;
// int **i2;
// dCompRow_to_CompCol(rA.size1(), rA.size2(), rA.nnz(),
// rA.value_data().begin(), index2_vector, index1_vector,
// values, i1, i2);
//jmc
//NCformat *Astore;
//Astore = (NCformat*) A.Store;
dfill_diag(n, (NCformat*) A.Store);
//jmc
//printf("Dimension %dx%d; # nonzeros %d\n", A.nrow, A.ncol, ((NCformat*) A.Store)->nnz);
fflush(stdout);
/* Make a copy of the original matrix. */
// nnz = Astore->nnz;
// a_orig = doubleMalloc(nnz);
// asub_orig = intMalloc(nnz);
// xa_orig = intMalloc(n+1);
// for (i = 0; i < nnz; ++i)
// {
// a_orig[i] = ((double *)Astore->nzval)[i];
// asub_orig[i] = Astore->rowind[i];
// }
// for (i = 0; i <= n; ++i) xa_orig[i] = Astore->colptr[i];
// dCreate_CompCol_Matrix(&AA, m, n, nnz, a_orig, asub_orig, xa_orig,
// SLU_NR, SLU_D, SLU_GE);
/* Generate the right-hand side */
// if ( !(rhsb = doubleMalloc(m * nrhs)) ) ABORT("Malloc fails for rhsb[].");
// if ( !(rhsx = doubleMalloc(m * nrhs)) ) ABORT("Malloc fails for rhsx[].");
dCreate_Dense_Matrix (&B, rB.size(), 1,&rB[0],rB.size(),SLU_DN, SLU_D, SLU_GE);
dCreate_Dense_Matrix (&X, rX.size(), 1,&rX[0],rX.size(),SLU_DN, SLU_D, SLU_GE);
//dCreate_Dense_Matrix(&B, m, nrhs, rhsb, m, SLU_DN, SLU_D, SLU_GE);
//dCreate_Dense_Matrix(&X, m, nrhs, rhsx, m, SLU_DN, SLU_D, SLU_GE);
//xact = doubleMalloc(n * nrhs);
//ldx = n;
//dGenXtrue(n, nrhs, xact, ldx);
//dFillRHS(trans, nrhs, xact, ldx, &A, &B);
if ( !(etree = intMalloc(n)) ) ABORT("Malloc fails for etree[].");
if ( !(perm_r = intMalloc(m)) ) ABORT("Malloc fails for perm_r[].");
if ( !(perm_c = intMalloc(n)) ) ABORT("Malloc fails for perm_c[].");
if ( !(R = (double *) SUPERLU_MALLOC(A.nrow * sizeof(double))) )
ABORT("SUPERLU_MALLOC fails for R[].");
if ( !(C = (double *) SUPERLU_MALLOC(A.ncol * sizeof(double))) )
ABORT("SUPERLU_MALLOC fails for C[].");
info = 0;
#ifdef DEBUG
num_drop_L = 0;
num_drop_U = 0;
#endif
/* Initialize the statistics variables. */
StatInit(&stat);
/* Compute the incomplete factorization and compute the condition number
and pivot growth using dgsisx. */
B.ncol = 0; /* not to perform triangular solution */
dgsisx(&options, &A, perm_c, perm_r, etree, equed, R, C, &L, &U, work,
lwork, &B, &X, &rpg, &rcond, &mem_usage, &stat, &info);
/* Set RHS for GMRES. */
if (!(b = doubleMalloc(m))) ABORT("Malloc fails for b[].");
for (i = 0; i < m; i++) b[i] = rB[i];
//jmc
//printf("dgsisx(): info %d, equed %c\n", info, equed[0]);
//if (info > 0 || rcond < 1e-8 || rpg > 1e8)
// printf("WARNING: This preconditioner might be unstable.\n");
//if ( info == 0 || info == n+1 )
//{
// if ( options.PivotGrowth == YES )
// printf("Recip. pivot growth = %e\n", rpg);
// if ( options.ConditionNumber == YES )
// printf("Recip. condition number = %e\n", rcond);
//}
//else if ( info > 0 && lwork == -1 )
//{
// printf("** Estimated memory: %d bytes\n", info - n);
//}
//jmc
//NCformat *Ustore;
//SCformat *Lstore;
//Lstore = (SCformat *) L.Store;
//Ustore = (NCformat *) U.Store;
//printf("n(A) = %d, nnz(A) = %d\n", n, Astore->nnz);
//printf("No of nonzeros in factor L = %d\n", Lstore->nnz);
//printf("No of nonzeros in factor U = %d\n", Ustore->nnz);
//printf("No of nonzeros in L+U = %d\n", Lstore->nnz + Ustore->nnz - n);
//printf("Fill ratio: nnz(F)/nnz(A) = %.3f\n",
// ((double)(Lstore->nnz) + (double)(Ustore->nnz) - (double)n)
// / (double)Astore->nnz);
//printf("L\\U MB %.3f\ttotal MB needed %.3f\n",
// mem_usage.for_lu/1e6, mem_usage.total_needed/1e6);
//fflush(stdout);
/* Set the global variables. */
GLOBAL_A = &A;
// GLOBAL_A_ORIG = &AA;
GLOBAL_L = &L;
GLOBAL_U = &U;
GLOBAL_STAT = &stat;
GLOBAL_PERM_C = perm_c;
GLOBAL_PERM_R = perm_r;
GLOBAL_OPTIONS = &options;
GLOBAL_EQUED = equed;
GLOBAL_R = R;
GLOBAL_C = C;
GLOBAL_MEM_USAGE = &mem_usage;
/* Set the options to do solve-only. */
options.Fact = FACTORED;
options.PivotGrowth = NO;
options.ConditionNumber = NO;
/* Set the variables used by GMRES. */
//restrt = SUPERLU_MIN(n / 3 + 1, 150);
int restrt = SUPERLU_MIN(n / 3 + 1, mrestart);
//jmc
//int maxit = mmax_it; //1000;
int iter = mmax_it;
resid = mTol; // 1e-8;
if (!(x = doubleMalloc(n))) ABORT("Malloc fails for x[].");
if (info <= n + 1)
{
int i_1 = 1;
double nrmB, res, t;
//extern double dnrm2_(int *, double [], int *);
//extern void daxpy_(int *, double *, double [], int *, double [], int *);
/* Initial guess */
for (i = 0; i < n; i++) x[i] = zero;
t = SuperLU_timer_();
/* Call GMRES */
//dfgmr(n, dmatvec_mult, dpsolve, b, x, resid, restrt, &iter, stdout);
dfgmr(n, dmatvec_mult, dpsolve, b, x, resid, restrt, &iter, NULL);
/* Scale the solution back if equilibration was performed. */
if (*equed == 'C' || *equed == 'B')
{
#pragma omp parallel for
for (int i = 0; i < n; i++) x[i] *= C[i];
}
t = SuperLU_timer_() - t;
/* Output the result. */
//jmc
//double nrmA;
//nrmA = dnrm2_(&(Astore->nnz), (double *)((DNformat *)A.Store)->nzval,
// &i_1);
nrmB = dnrm2_(&m, b, &i_1);
sp_dgemv( ( char *)"N", -1.0, &A, x, 1, 1.0, b, 1);
res = dnrm2_(&m, b, &i_1);
resid = res / nrmB;
//jmc
//printf("||A||_F = %.1e, ||B||_2 = %.1e, ||B-A*X||_2 = %.1e, "
// "relres = %.1e\n", nrmA, nrmB, res, resid);
//if (iter >= maxit)
//{
// if (resid >= 1.0){
// iter = -180;
// std::cout << "final residual is VERY VERY LARGE" << std::endl;
//}
// else if (resid > 1e-8) iter = -111;
//}
//printf("iteration: %d\nresidual: %.1e\nGMRES time: %.2f seconds.\n",
// iter, resid, t);
//for (i = 0; i < m; i++) {
// maxferr = SUPERLU_MAX(maxferr, fabs(x[i] - xact[i]));
//}
//printf("||X-X_true||_oo = %.1e\n", maxferr);
}
fflush(stdout);
#pragma omp parallel for
for (int i = 0; i < n; i++) rX[i] = x[i];
if ( options.PrintStat ) StatPrint(&stat);
StatFree(&stat);
//SUPERLU_FREE (rhsb);
//SUPERLU_FREE (rhsx);
//SUPERLU_FREE (xact);
SUPERLU_FREE (etree);
SUPERLU_FREE (perm_r);
SUPERLU_FREE (perm_c);
SUPERLU_FREE (R);
SUPERLU_FREE (C);
Destroy_SuperMatrix_Store(&A);
// Destroy_CompCol_Matrix(&AA);
Destroy_SuperMatrix_Store(&B);
Destroy_SuperMatrix_Store(&X);
if ( lwork >= 0 )
{
Destroy_SuperNode_Matrix(&L);
Destroy_CompCol_Matrix(&U);
}
SUPERLU_FREE(b);
SUPERLU_FREE(x);
SUPERLU_FREE(asubt);
SUPERLU_FREE(xat);
SUPERLU_FREE(at);
#if ( DEBUGlevel>=1 )
CHECK_MALLOC("Exit main()");
#endif
return true;
}
/**
* Multi solve method for solving a set of linear systems with same coefficient matrix.
* Solves the linear system Ax=b and puts the result on SystemVector& rX.
* rX is also th initial guess for iterative methods.
* @param rA. System matrix
* @param rX. Solution vector.
* @param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override
{
/**
* TODO:
* translate SparseMatrixType into SuperMatrix
* call solving routine from SuperLU
*/
// slu::gssv ( rA, rB, slu::atpla_min_degree);
// std::cout<<"Matrix Test:"<<std::endl;
// std::cout<<"boost matrix:"<<std::endl;
// KRATOS_WATCH( rA );
// const int size1 = TDenseSpaceType::Size1(rX);
// const int size2 = TDenseSpaceType::Size2(rX);
bool is_solved = true;
// VectorType x(size1);
// VectorType b(size1);
// define an object to store skyline matrix and factorization
// LUSkylineFactorization<TSparseSpaceType, TDenseSpaceType> myFactorization;
// copy myMatrix into skyline format
// myFactorization.copyFromCSRMatrix(rA);
// factorize it
// myFactorization.factorize();
// for(int i = 0 ; i < size2 ; i++)
// {
// TDenseSpaceType::GetColumn(i,rX, x);
// TDenseSpaceType::GetColumn(i,rB, b);
// and back solve
// myFactorization.backForwardSolve(size1, b, x);
// TDenseSpaceType::SetColumn(i,rX, x);
// TDenseSpaceType::SetColumn(i,rB, b);
// }
return is_solved;
}
/**
* Print information about this object.
*/
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "SuperLU solver finished.";
}
/**
* Print object's data.
*/
void PrintData(std::ostream& rOStream) const override
{
}
private:
double mTol;
int mmax_it;
int mrestart;
double mDropTol;
double mFillTol;
double mFillFactor;
/**
* Assignment operator.
*/
SuperLUIterativeSolver& operator=(const SuperLUIterativeSolver& Other);
/**
* Copy constructor.
*/
SuperLUIterativeSolver(const SuperLUIterativeSolver& Other);
}; // Class SkylineLUFactorizationSolver
/**
* input stream function
*/
template<class TSparseSpaceType, class TDenseSpaceType,class TReordererType>
inline std::istream& operator >> (std::istream& rIStream, SuperLUIterativeSolver< TSparseSpaceType,
TDenseSpaceType, TReordererType>& rThis)
{
return rIStream;
}
/**
* output stream function
*/
template<class TSparseSpaceType, class TDenseSpaceType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const SuperLUIterativeSolver<TSparseSpaceType,
TDenseSpaceType, TReordererType>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
} // namespace Kratos.
#endif // KRATOS_SUPERLU_ITERATIVE_SOLVER_H_INCLUDED defined
|
huffman.c | /*
* huffman - Encode/Decode files using Huffman encoding.
* http://huffman.sourceforge.net
* Copyright (C) 2003 Douglas Ryan Richardson
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "huffman.h"
#ifdef WIN32
#include <winsock2.h>
#include <malloc.h>
#define alloca _alloca
#else
#include <netinet/in.h>
#endif
#define CORES 4
typedef struct huffman_node_tag
{
unsigned char isLeaf;
unsigned long count;
struct huffman_node_tag *parent;
union
{
struct
{
struct huffman_node_tag *zero, *one;
};
unsigned char symbol;
};
} huffman_node;
typedef struct huffman_code_tag
{
/* The length of this code in bits. */
unsigned long numbits;
/* The bits that make up this code. The first
bit is at position 0 in bits[0]. The second
bit is at position 1 in bits[0]. The eighth
bit is at position 7 in bits[0]. The ninth
bit is at position 0 in bits[1]. */
unsigned char *bits;
} huffman_code;
static unsigned long
numbytes_from_numbits(unsigned long numbits)
{
return numbits / 8 + (numbits % 8 ? 1 : 0);
}
/*
* get_bit returns the ith bit in the bits array
* in the 0th position of the return value.
*/
static unsigned char
get_bit(unsigned char* bits, unsigned long i)
{
return (bits[i / 8] >> i % 8) & 1;
}
static void
reverse_bits(unsigned char* bits, unsigned long numbits)
{
unsigned long numbytes = numbytes_from_numbits(numbits);
unsigned char *tmp =
(unsigned char*)alloca(numbytes);
unsigned long curbit;
long curbyte = 0;
memset(tmp, 0, numbytes);
for(curbit = 0; curbit < numbits; ++curbit)
{
unsigned int bitpos = curbit % 8;
if(curbit > 0 && curbit % 8 == 0)
++curbyte;
tmp[curbyte] |= (get_bit(bits, numbits - curbit - 1) << bitpos);
}
memcpy(bits, tmp, numbytes);
}
/*
* new_code builds a huffman_code from a leaf in
* a Huffman tree.
*/
static huffman_code*
new_code(const huffman_node* leaf)
{
/* Build the huffman code by walking up to
* the root node and then reversing the bits,
* since the Huffman code is calculated by
* walking down the tree. */
unsigned long numbits = 0;
unsigned char* bits = NULL;
huffman_code *p;
while(leaf && leaf->parent)
{
huffman_node *parent = leaf->parent;
unsigned char cur_bit = (unsigned char)(numbits % 8);
unsigned long cur_byte = numbits / 8;
/* If we need another byte to hold the code,
then allocate it. */
if(cur_bit == 0)
{
size_t newSize = cur_byte + 1;
bits = (unsigned char*)realloc(bits, newSize);
bits[newSize - 1] = 0; /* Initialize the new byte. */
}
/* If a one must be added then or it in. If a zero
* must be added then do nothing, since the byte
* was initialized to zero. */
if(leaf == parent->one)
bits[cur_byte] |= 1 << cur_bit;
++numbits;
leaf = parent;
}
if(bits)
reverse_bits(bits, numbits);
p = (huffman_code*)malloc(sizeof(huffman_code));
p->numbits = numbits;
p->bits = bits;
return p;
}
#define MAX_SYMBOLS 256
typedef huffman_node* SymbolFrequencies[MAX_SYMBOLS];
typedef huffman_code* SymbolEncoder[MAX_SYMBOLS];
static huffman_node*
new_leaf_node(unsigned char symbol)
{
huffman_node *p = (huffman_node*)malloc(sizeof(huffman_node));
p->isLeaf = 1;
p->symbol = symbol;
p->count = 0;
p->parent = 0;
return p;
}
static huffman_node*
new_nonleaf_node(unsigned long count, huffman_node *zero, huffman_node *one)
{
huffman_node *p = (huffman_node*)malloc(sizeof(huffman_node));
p->isLeaf = 0;
p->count = count;
p->zero = zero;
p->one = one;
p->parent = 0;
return p;
}
static void
free_huffman_tree(huffman_node *subtree)
{
if(subtree == NULL)
return;
if(!subtree->isLeaf)
{
free_huffman_tree(subtree->zero);
free_huffman_tree(subtree->one);
}
free(subtree);
}
static void
free_code(huffman_code* p)
{
free(p->bits);
free(p);
}
static void
free_encoder(SymbolEncoder *pSE)
{
unsigned long i;
for(i = 0; i < MAX_SYMBOLS; ++i)
{
huffman_code *p = (*pSE)[i];
if(p)
free_code(p);
}
free(pSE);
}
static void
init_frequencies(SymbolFrequencies *pSF)
{
memset(*pSF, 0, sizeof(SymbolFrequencies));
}
typedef struct buf_cache_tag
{
unsigned char *cache;
unsigned int cache_len;
unsigned int cache_cur;
unsigned char **pbufout;
unsigned int *pbufoutlen;
} buf_cache;
static int init_cache(buf_cache* pc,
unsigned int cache_size,
unsigned char **pbufout,
unsigned int *pbufoutlen)
{
assert(pc && pbufout && pbufoutlen);
if(!pbufout || !pbufoutlen)
return 1;
pc->cache = (unsigned char*)malloc(cache_size);
pc->cache_len = cache_size;
pc->cache_cur = 0;
pc->pbufout = pbufout;
*pbufout = NULL;
pc->pbufoutlen = pbufoutlen;
*pbufoutlen = 0;
return pc->cache ? 0 : 1;
}
static void free_cache(buf_cache* pc)
{
assert(pc);
if(pc->cache)
{
free(pc->cache);
pc->cache = NULL;
}
}
static int flush_cache(buf_cache* pc)
{
assert(pc);
if(pc->cache_cur > 0)
{
unsigned int newlen = pc->cache_cur + *pc->pbufoutlen;
unsigned char* tmp = realloc(*pc->pbufout, newlen);
if(!tmp)
return 1;
memcpy(tmp + *pc->pbufoutlen, pc->cache, pc->cache_cur);
*pc->pbufout = tmp;
*pc->pbufoutlen = newlen;
pc->cache_cur = 0;
}
return 0;
}
static int write_cache(buf_cache* pc,
const void *to_write,
unsigned int to_write_len)
{
unsigned char* tmp;
assert(pc && to_write);
assert(pc->cache_len >= pc->cache_cur);
/* If trying to write more than the cache will hold
* flush the cache and allocate enough space immediately,
* that is, don't use the cache. */
if(to_write_len > pc->cache_len - pc->cache_cur)
{
unsigned int newlen;
flush_cache(pc);
newlen = *pc->pbufoutlen + to_write_len;
tmp = realloc(*pc->pbufout, newlen);
if(!tmp)
return 1;
memcpy(tmp + *pc->pbufoutlen, to_write, to_write_len);
*pc->pbufout = tmp;
*pc->pbufoutlen = newlen;
}
else
{
/* Write the data to the cache. */
memcpy(pc->cache + pc->cache_cur, to_write, to_write_len);
pc->cache_cur += to_write_len;
}
return 0;
}
static unsigned int
get_symbol_frequencies_from_memory(SymbolFrequencies *pSF,
const unsigned char *bufin,
unsigned int bufinlen)
{
unsigned int i;
/* Set all frequencies to 0. */
init_frequencies(pSF);
/* Count the frequency of each symbol in the input file. */
for(i = 0; i < bufinlen; ++i)
{
unsigned char uc = bufin[i];
if(!(*pSF)[uc])
(*pSF)[uc] = new_leaf_node(uc);
++(*pSF)[uc]->count;
}
return bufinlen;
}
/*
* When used by qsort, SFComp sorts the array so that
* the symbol with the lowest frequency is first. Any
* NULL entries will be sorted to the end of the list.
*/
static int
SFComp(const void *p1, const void *p2)
{
const huffman_node *hn1 = *(const huffman_node**)p1;
const huffman_node *hn2 = *(const huffman_node**)p2;
/* Sort all NULLs to the end. */
if(hn1 == NULL && hn2 == NULL)
return 0;
if(hn1 == NULL)
return 1;
if(hn2 == NULL)
return -1;
if(hn1->count > hn2->count)
return 1;
else if(hn1->count < hn2->count)
return -1;
return 0;
}
/*
* build_symbol_encoder builds a SymbolEncoder by walking
* down to the leaves of the Huffman tree and then,
* for each leaf, determines its code.
*/
static void
build_symbol_encoder(huffman_node *subtree, SymbolEncoder *pSF)
{
if(subtree == NULL)
return;
if(subtree->isLeaf)
(*pSF)[subtree->symbol] = new_code(subtree);
else
{
build_symbol_encoder(subtree->zero, pSF);
build_symbol_encoder(subtree->one, pSF);
}
}
/*
* calculate_huffman_codes turns pSF into an array
* with a single entry that is the root of the
* huffman tree. The return value is a SymbolEncoder,
* which is an array of huffman codes index by symbol value.
*/
static SymbolEncoder*
calculate_huffman_codes(SymbolFrequencies * pSF)
{
unsigned int i = 0;
unsigned int n = 0;
huffman_node *m1 = NULL, *m2 = NULL;
SymbolEncoder *pSE = NULL;
/* Sort the symbol frequency array by ascending frequency. */
qsort((*pSF), MAX_SYMBOLS, sizeof((*pSF)[0]), SFComp);
/* Get the number of symbols. */
for(n = 0; n < MAX_SYMBOLS && (*pSF)[n]; ++n)
;
/*
* Construct a Huffman tree. This code is based
* on the algorithm given in Managing Gigabytes
* by Ian Witten et al, 2nd edition, page 34.
* Note that this implementation uses a simple
* count instead of probability.
*/
for(i = 0; i < n - 1; ++i)
{
/* Set m1 and m2 to the two subsets of least probability. */
m1 = (*pSF)[0];
m2 = (*pSF)[1];
/* Replace m1 and m2 with a set {m1, m2} whose probability
* is the sum of that of m1 and m2. */
(*pSF)[0] = m1->parent = m2->parent =
new_nonleaf_node(m1->count + m2->count, m1, m2);
(*pSF)[1] = NULL;
/* Put newSet into the correct count position in pSF. */
qsort((*pSF), n, sizeof((*pSF)[0]), SFComp);
}
/* Build the SymbolEncoder array from the tree. */
pSE = (SymbolEncoder*)malloc(sizeof(SymbolEncoder));
memset(pSE, 0, sizeof(SymbolEncoder));
build_symbol_encoder((*pSF)[0], pSE);
return pSE;
}
/*
* Allocates memory and sets *pbufout to point to it. The memory
* contains the code table.
*/
static int
write_code_table_to_memory(buf_cache *pc,
SymbolEncoder *se,
uint32_t symbol_count)
{
uint32_t i, count = 0;
/* Determine the number of entries in se. */
for(i = 0; i < MAX_SYMBOLS; ++i)
{
if((*se)[i])
++count;
}
/* Write the number of entries in network byte order. */
i = htonl(count);
if(write_cache(pc, &i, sizeof(i)))
return 1;
/* Write the number of bytes that will be encoded. */
symbol_count = htonl(symbol_count);
if(write_cache(pc, &symbol_count, sizeof(symbol_count)))
return 1;
/* Write the entries. */
for(i = 0; i < MAX_SYMBOLS; ++i)
{
huffman_code *p = (*se)[i];
if(p)
{
unsigned int numbytes;
/* The value of i is < MAX_SYMBOLS (256), so it can
be stored in an unsigned char. */
unsigned char uc = (unsigned char)i;
/* Write the 1 byte symbol. */
if(write_cache(pc, &uc, sizeof(uc)))
return 1;
/* Write the 1 byte code bit length. */
uc = (unsigned char)p->numbits;
if(write_cache(pc, &uc, sizeof(uc)))
return 1;
/* Write the code bytes. */
numbytes = numbytes_from_numbits(p->numbits);
if(write_cache(pc, p->bits, numbytes))
return 1;
}
}
return 0;
}
static int
memread(const unsigned char* buf,
unsigned int buflen,
unsigned int *pindex,
void* bufout,
unsigned int readlen)
{
assert(buf && pindex && bufout);
assert(buflen >= *pindex);
if(buflen < *pindex)
return 1;
if(readlen + *pindex >= buflen)
return 1;
memcpy(bufout, buf + *pindex, readlen);
*pindex += readlen;
return 0;
}
static huffman_node*
read_code_table_from_memory(const unsigned char* bufin,
unsigned int bufinlen,
unsigned int *pindex,
uint32_t *pDataBytes)
{
huffman_node *root = new_nonleaf_node(0, NULL, NULL);
uint32_t count;
/* Read the number of entries.
(it is stored in network byte order). */
if(memread(bufin, bufinlen, pindex, &count, sizeof(count)))
{
free_huffman_tree(root);
return NULL;
}
count = ntohl(count);
/* Read the number of data bytes this encoding represents. */
if(memread(bufin, bufinlen, pindex, pDataBytes, sizeof(*pDataBytes)))
{
free_huffman_tree(root);
return NULL;
}
*pDataBytes = ntohl(*pDataBytes);
/* Read the entries. */
while(count-- > 0)
{
unsigned int curbit;
unsigned char symbol;
unsigned char numbits;
unsigned char numbytes;
unsigned char *bytes;
huffman_node *p = root;
if(memread(bufin, bufinlen, pindex, &symbol, sizeof(symbol)))
{
free_huffman_tree(root);
return NULL;
}
if(memread(bufin, bufinlen, pindex, &numbits, sizeof(numbits)))
{
free_huffman_tree(root);
return NULL;
}
numbytes = (unsigned char)numbytes_from_numbits(numbits);
bytes = (unsigned char*)malloc(numbytes);
if(memread(bufin, bufinlen, pindex, bytes, numbytes))
{
free(bytes);
free_huffman_tree(root);
return NULL;
}
/*
* Add the entry to the Huffman tree. The value
* of the current bit is used switch between
* zero and one child nodes in the tree. New nodes
* are added as needed in the tree.
*/
for(curbit = 0; curbit < numbits; ++curbit)
{
if(get_bit(bytes, curbit))
{
if(p->one == NULL)
{
p->one = curbit == (unsigned char)(numbits - 1)
? new_leaf_node(symbol)
: new_nonleaf_node(0, NULL, NULL);
p->one->parent = p;
}
p = p->one;
}
else
{
if(p->zero == NULL)
{
p->zero = curbit == (unsigned char)(numbits - 1)
? new_leaf_node(symbol)
: new_nonleaf_node(0, NULL, NULL);
p->zero->parent = p;
}
p = p->zero;
}
}
free(bytes);
}
return root;
}
static int
do_memory_encode(buf_cache *pc,
const unsigned char* bufin,
unsigned int bufinlen,
SymbolEncoder *se)
{
unsigned char curbyte = 0;
unsigned char curbit = 0;
unsigned int i;
for(i = 0; i < bufinlen; ++i)
{
unsigned char uc = bufin[i];
huffman_code *code = (*se)[uc];
unsigned long i;
for(i = 0; i < code->numbits; ++i)
{
/* Add the current bit to curbyte. */
curbyte |= get_bit(code->bits, i) << curbit;
/* If this byte is filled up then write it
* out and reset the curbit and curbyte. */
if(++curbit == 8)
{
if(write_cache(pc, &curbyte, sizeof(curbyte)))
return 1;
curbyte = 0;
curbit = 0;
}
}
}
/*
* If there is data in curbyte that has not been
* output yet, which means that the last encoded
* character did not fall on a byte boundary,
* then output it.
*/
curbit > 0 ? write_cache(pc, &curbyte, sizeof(curbyte)) : 0;
return (8 - curbit) % 8;
}
unsigned int merge_buffers(unsigned char **output,
unsigned char **bufout_piece,
unsigned int *bufout_piece_len,
unsigned int *zeros)
{
int i;
unsigned int size = 0;
unsigned int cur_len = 0;
unsigned char sel_mask[9];
/**
* Little Endian
sel_mask[0] = 0x00; sel_mask[1] = 0x80;
sel_mask[2] = 0xc0; sel_mask[3] = 0xe0;
sel_mask[4] = 0xf0; sel_mask[5] = 0xf8;
sel_mask[6] = 0xfc; sel_mask[7] = 0xfe;
sel_mask[8] = 0xff;
*/
// Big Endian
sel_mask[0] = 0x00; sel_mask[1] = 0x01;
sel_mask[2] = 0x03; sel_mask[3] = 0x07;
sel_mask[4] = 0x0f; sel_mask[5] = 0x1f;
sel_mask[6] = 0x3f; sel_mask[7] = 0x7f;
sel_mask[8] = 0xff;
for (i = 0; i < CORES; ++i) {
size += bufout_piece_len[i];
}
*output = malloc(size * sizeof(unsigned char));
memcpy(*output, bufout_piece[0], bufout_piece_len[0]);
cur_len += bufout_piece_len[0];
unsigned char bits;
unsigned int padding;
int kk;
for (kk = 1; kk < CORES; ++kk) {
if (zeros[kk - 1] != 0) {
bits = sel_mask[zeros[kk-1]] & bufout_piece[kk][0];
(*output)[cur_len - 1] |= (bits << (8 - zeros[kk - 1]));
for (i = 0; i < bufout_piece_len[kk]; ++i) {
if (i == (bufout_piece_len[kk] - 1)) {
if (zeros[kk - 1] + zeros[kk] >= 8) {
bufout_piece_len[kk]--;
zeros[kk] = zeros[kk - 1] + zeros[kk] - 8;
} else {
bufout_piece[kk][i] = bufout_piece[kk][i] >> zeros[kk - 1];
zeros[kk] += zeros[kk - 1];
}
} else {
padding = zeros[kk - 1];
bufout_piece[kk][i] >>= padding;
bits = sel_mask[padding] & bufout_piece[kk][i + 1];
bufout_piece[kk][i] |= (bits << (8 - padding));
}
}
}
memcpy(*output + cur_len, bufout_piece[kk], bufout_piece_len[kk]);
cur_len += bufout_piece_len[kk];
}
return cur_len;
}
#define CACHE_SIZE 1024
int huffman_encode_memory(const unsigned char *bufin,
unsigned int bufinlen,
unsigned char **pbufout,
unsigned int *pbufoutlen)
{
SymbolFrequencies sf;
SymbolEncoder *se;
huffman_node *root = NULL;
int rc, i;
unsigned int symbol_count;
buf_cache cache;
buf_cache cache_tid[CORES];
unsigned char* _bufout[CORES];
unsigned int _bufoutlen[CORES];
unsigned int remains[CORES];
/* Ensure the arguments are valid. */
if(!pbufout || !pbufoutlen)
return 1;
if(init_cache(&cache, CACHE_SIZE, pbufout, pbufoutlen))
return 1;
/**
* Init cache for every thread
*/
#pragma omp parallel for num_threads(CORES)
for (i = 0; i < CORES; ++i) {
_bufout[i] = NULL;
_bufoutlen[i] = 0;
init_cache(&cache_tid[i], CACHE_SIZE, &_bufout[i], &_bufoutlen[i]);
}
/* Get the frequency of each symbol in the input memory. */
symbol_count = get_symbol_frequencies_from_memory(&sf, bufin, bufinlen);
/* Build an optimal table from the symbolCount. */
se = calculate_huffman_codes(&sf);
root = sf[0];
/* Scan the memory again and, using the table
previously built, encode it into the output memory. */
rc = write_code_table_to_memory(&cache, se, symbol_count);
flush_cache(&cache);
if(rc == 0) {
#pragma omp parallel for num_threads(CORES)
for (i = 0; i < CORES; ++i) {
remains[i] = do_memory_encode(&cache_tid[i], bufin + (i * bufinlen / CORES), bufinlen / CORES, se);
flush_cache(&cache_tid[i]);
}
}
unsigned int tmp_size = *pbufoutlen;
for (i = 0; i < CORES; ++i) {
tmp_size += _bufoutlen[i];
}
unsigned char *tmp = realloc(*pbufout, tmp_size * sizeof(char));
unsigned char *aux;
unsigned int res = merge_buffers(&aux, _bufout, _bufoutlen, remains);
memcpy(tmp + *pbufoutlen, aux, res);
*pbufout = tmp;
*pbufoutlen += res;
/* Free the Huffman tree. */
free_huffman_tree(root);
free_encoder(se);
free_cache(&cache);
return 0;
}
int huffman_decode_memory(const unsigned char *bufin,
unsigned int bufinlen,
unsigned char **pbufout,
unsigned int *pbufoutlen)
{
huffman_node *root, *p;
unsigned int data_count;
unsigned int i = 0;
unsigned char *buf;
unsigned int bufcur = 0;
/* Ensure the arguments are valid. */
if(!pbufout || !pbufoutlen) {
return 1;
}
/* Read the Huffman code table. */
root = read_code_table_from_memory(bufin, bufinlen, &i, &data_count);
if(!root) {
return 1;
}
buf = (unsigned char*)malloc(data_count);
/* Decode the memory. */
p = root;
for(; i < bufinlen && data_count > 0; ++i)
{
unsigned char byte = bufin[i];
unsigned char mask = 1;
while(data_count > 0 && mask)
{
p = byte & mask ? p->one : p->zero;
mask <<= 1;
if(p->isLeaf)
{
buf[bufcur++] = p->symbol;
p = root;
--data_count;
}
}
}
free_huffman_tree(root);
*pbufout = buf;
*pbufoutlen = bufcur;
return 0;
}
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/variable_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "utilities/builtin_timer.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @tparam TSparseSpace The sparse system considered
* @tparam TDenseSpace The dense system considered
* @tparam TLinearSolver The linear solver considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Definition of the flags
KRATOS_DEFINE_LOCAL_FLAG( SILENT_WARNINGS );
// Scaling enum
enum class SCALING_DIAGONAL {NO_SCALING = 0, CONSIDER_NORM_DIAGONAL = 1, CONSIDER_MAX_DIAGONAL = 2, CONSIDER_PRESCRIBED_DIAGONAL = 3};
/// Definition of the pointer
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = rModelPart.ElementsBegin();
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Contributions to the system
LocalSystemMatrixType lhs_contribution(0, 0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements, nconditions, lhs_contribution, equation_id )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active)
{
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time LHS: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished parallel building LHS" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
virtual void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(rb.size());
InternalSystemSolveWithPhysics(rA, Dxmodified, rb, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, rDx);
} else {
InternalSystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedBlockBuilderAndSolver", mOptions.IsNot(SILENT_WARNINGS)) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time Linearizing with the database at the old iteration
* @details It is ideally the fastest and safer function to use when it is possible to solve just after building
* @param pScheme The pointer to the integration scheme
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rDx The vector of unkowns
* @param rb The RHS vector of the system of equations
* @param MoveMesh tells if the update of the scheme needs to be performed when calling the Update of the scheme
*/
void BuildAndSolveLinearizedOnPreviousIteration(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh
) override
{
KRATOS_INFO_IF("BlockBuilderAndSolver", this->GetEchoLevel() > 0)
<< "Linearizing on Old iteration" << std::endl;
KRATOS_ERROR_IF(rModelPart.GetBufferSize() == 1) << "BlockBuilderAndSolver: \n"
<< "The buffer size needs to be at least 2 in order to use \n"
<< "BuildAndSolveLinearizedOnPreviousIteration \n"
<< "current buffer size for modelpart: " << rModelPart.Name() << std::endl
<< "is :" << rModelPart.GetBufferSize()
<< " Please set IN THE STRATEGY SETTINGS "
<< " UseOldStiffnessInFirstIteration=false " << std::endl;
DofsArrayType fixed_dofs;
for(auto& r_dof : BaseType::mDofSet){
if(r_dof.IsFixed()){
fixed_dofs.push_back(&r_dof);
r_dof.FreeDof();
}
}
//TODO: Here we need to take the vector from other ones because
// We cannot create a trilinos vector without a communicator. To be improved!
TSystemVectorType dx_prediction(rDx);
TSystemVectorType rhs_addition(rb); //we know it is zero here, so we do not need to set it
// Here we bring back the database to before the prediction,
// but we store the prediction increment in dx_prediction.
// The goal is that the stiffness is computed with the
// converged configuration at the end of the previous step.
const auto it_dof_begin = BaseType::mDofSet.begin();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) {
auto it_dof = it_dof_begin + i;
//NOTE: this is initialzed to - the value of dx prediction
dx_prediction[it_dof->EquationId()] = -(it_dof->GetSolutionStepValue() - it_dof->GetSolutionStepValue(1));
}
// Use UpdateDatabase to bring back the solution to how it was at the end of the previous step
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
this->Build(pScheme, rModelPart, rA, rb);
// Put back the prediction into the database
TSparseSpace::InplaceMult(dx_prediction, -1.0); //change sign to dx_prediction
TSparseSpace::UnaliasedAdd(rDx, 1.0, dx_prediction);
// Use UpdateDatabase to bring back the solution
// to where it was taking into account BCs
// it is done here so that constraints are correctly taken into account right after
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
// Apply rb -= A*dx_prediction
TSparseSpace::Mult(rA, dx_prediction, rhs_addition);
TSparseSpace::UnaliasedAdd(rb, -1.0, rhs_addition);
for(auto& dof : fixed_dofs)
dof.FixDof();
if (!rModelPart.MasterSlaveConstraints().empty()) {
this->ApplyConstraints(pScheme, rModelPart, rA, rb);
}
this->ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
this->SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyRHSConstraints");
ApplyRHSConstraints(pScheme, rModelPart, rb);
Timer::Stop("ApplyRHSConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("BuildRHS");
BuildRHSNoDirichlet(pScheme,rModelPart,b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0;
}
Timer::Stop("BuildRHS");
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids.
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
const std::size_t system_size = rA.size1();
Vector scaling_factors (system_size);
const auto it_dof_iterator_begin = BaseType::mDofSet.begin();
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
// NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
auto it_dof_iterator = it_dof_iterator_begin + k;
if (it_dof_iterator->IsFixed()) {
scaling_factors[k] = 0.0;
} else {
scaling_factors[k] = 1.0;
}
}
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// The diagonal considered
mScaleFactor = GetScaleNorm(rModelPart, rA);
// Detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel firstprivate(system_size)
{
std::size_t col_begin = 0, col_end = 0;
bool empty = true;
#pragma omp for
for (int k = 0; k < static_cast<int>(system_size); ++k) {
col_begin = Arow_indices[k];
col_end = Arow_indices[k + 1];
empty = true;
for (std::size_t j = col_begin; j < col_end; ++j) {
if(Avalues[j] != 0.0) {
empty = false;
break;
}
}
if(empty) {
rA(k, k) = mScaleFactor;
rb[k] = 0.0;
}
}
}
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k) {
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
const double k_factor = scaling_factors[k];
if (k_factor == 0.0) {
// Zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k )
Avalues[j] = 0.0;
// Zero out the RHS
rb[k] = 0.0;
} else {
// Zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
}
}
/**
* @brief Applies the constraints with master-slave relation matrix (RHS only)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rb The RHS vector
*/
void ApplyRHSConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints with master-slave relation matrix
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
const double max_diag = GetMaxDiagonal(rA);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "block_builder_and_solver",
"block_builder" : true,
"diagonal_values_for_dirichlet_dofs" : "use_max_diagonal",
"silent_warnings" : false
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "block_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
double mScaleFactor = 1.0; /// The manuallyset scale factor
SCALING_DIAGONAL mScalingDiagonal; /// We identify the scaling considered for the dirichlet dofs
Flags mOptions; /// Some flags used internally
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
Timer::Start("ConstraintsRelationMatrixStructure");
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].SetLock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].UnSetLock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mT.size1()); ++i) {
const IndexType row_begin = Trow_indices[i];
const IndexType row_end = Trow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
}
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
#pragma omp atomic
r_value += constant_value;
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId(*i_element, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->EquationId(*i_condition, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
if (rModelPart.MasterSlaveConstraints().size() != 0) {
Element::EquationIdVectorType master_ids(3, 0);
Element::EquationIdVectorType slave_ids(3, 0);
const int nmasterSlaveConstraints = rModelPart.MasterSlaveConstraints().size();
const auto const_begin = rModelPart.MasterSlaveConstraints().begin();
#pragma omp parallel for firstprivate(nmasterSlaveConstraints, slave_ids, master_ids)
for (int iii = 0; iii<nmasterSlaveConstraints; ++iii) {
auto i_const = const_begin + iii;
i_const->EquationIdVector(slave_ids, master_ids, CurrentProcessInfo);
for (std::size_t i = 0; i < slave_ids.size(); i++) {
lock_array[slave_ids[i]].SetLock();
auto& row_indices = indices[slave_ids[i]];
row_indices.insert(slave_ids[i]);
lock_array[slave_ids[i]].UnSetLock();
}
for (std::size_t i = 0; i < master_ids.size(); i++) {
lock_array[master_ids[i]].SetLock();
auto& row_indices = indices[master_ids[i]];
row_indices.insert(master_ids[i]);
lock_array[master_ids[i]].UnSetLock();
}
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& rA,
const LocalSystemMatrixType& rLHSContribution,
Element::EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; i_local++) {
const IndexType i_global = rEquationId[i_local];
AssembleRowContribution(rA, rLHSContribution, i_global, i_local, rEquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
#pragma omp atomic
r_a += v_a;
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
last_found = id_to_find;
last_pos = pos;
}
}
/**
* @brief This method returns the scale norm considering for scaling the diagonal
* @param rModelPart The problem model part
* @param rA The LHS matrix
* @return The scale norm
*/
double GetScaleNorm(
ModelPart& rModelPart,
TSystemMatrixType& rA
)
{
switch (mScalingDiagonal) {
case SCALING_DIAGONAL::NO_SCALING:
return 1.0;
case SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL: {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
KRATOS_ERROR_IF_NOT(r_current_process_info.Has(BUILD_SCALE_FACTOR)) << "Scale factor not defined at process info" << std::endl;
return r_current_process_info.GetValue(BUILD_SCALE_FACTOR);
}
case SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL:
return GetDiagonalNorm(rA)/static_cast<double>(rA.size1());
case SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL:
return GetMaxDiagonal(rA);
// return TSparseSpace::TwoNorm(rA)/static_cast<double>(rA.size1());
default:
return GetMaxDiagonal(rA);
}
}
/**
* @brief This method returns the diagonal norm considering for scaling the diagonal
* @param rA The LHS matrix
* @return The diagonal norm
*/
double GetDiagonalNorm(TSystemMatrixType& rA)
{
double diagonal_norm = 0.0;
#pragma omp parallel for reduction(+:diagonal_norm)
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
diagonal_norm += std::pow(rA(i,i), 2);
}
return std::sqrt(diagonal_norm);
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetAveragevalueDiagonal(TSystemMatrixType& rA)
{
return 0.5 * (GetMaxDiagonal(rA) + GetMinDiagonal(rA));
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetMaxDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double max_diag = 0.0;
// #pragma omp parallel for reduction(max:max_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// max_diag = std::max(max_diag, std::abs(rA(i,i)));
// }
// return max_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector max_vector(num_threads, 0.0);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii > max_vector[id])
max_vector[id] = abs_value_ii;
}
double max_diag = 0.0;
for(int i = 0; i < num_threads; ++i) {
max_diag = std::max(max_diag, max_vector[i]);
}
return max_diag;
}
/**
* @brief This method returns the diagonal min value
* @param rA The LHS matrix
* @return The diagonal min value
*/
double GetMinDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double min_diag = std::numeric_limits<double>::max();
// #pragma omp parallel for reduction(min:min_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// min_diag = std::min(min_diag, std::abs(rA(i,i)));
// }
// return min_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector min_vector(num_threads, std::numeric_limits<double>::max());
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii < min_vector[id])
min_vector[id] = abs_value_ii;
}
double min_diag = std::numeric_limits<double>::max();
for(int i = 0; i < num_threads; ++i) {
min_diag = std::min(min_diag, min_vector[i]);
}
return min_diag;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Setting flags<
const std::string& r_diagonal_values_for_dirichlet_dofs = ThisParameters["diagonal_values_for_dirichlet_dofs"].GetString();
std::set<std::string> available_options_for_diagonal = {"no_scaling","use_max_diagonal","use_diagonal_norm","defined_in_process_info"};
if (available_options_for_diagonal.find(r_diagonal_values_for_dirichlet_dofs) == available_options_for_diagonal.end()) {
std::stringstream msg;
msg << "Currently prescribed diagonal values for dirichlet dofs : " << r_diagonal_values_for_dirichlet_dofs << "\n";
msg << "Admissible values for the diagonal scaling are : no_scaling, use_max_diagonal, use_diagonal_norm, or defined_in_process_info" << "\n";
KRATOS_ERROR << msg.str() << std::endl;
}
// The first option will not consider any scaling (the diagonal values will be replaced with 1)
if (r_diagonal_values_for_dirichlet_dofs == "no_scaling") {
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_max_diagonal") {
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_diagonal_norm") { // On this case the norm of the diagonal will be considered
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL;
} else { // Otherwise we will assume we impose a numerical value
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL;
}
mOptions.Set(SILENT_WARNINGS, ThisParameters["silent_warnings"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpace, class TDenseSpace, class TLinearSolver>
const Kratos::Flags ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::SILENT_WARNINGS(Kratos::Flags::Create(0));
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
target_exit_data_map_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - -x c++ %s
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp-simd -ferror-limit 100 -o - %s
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp-simd -ferror-limit 100 -o - -x c++ %s
int main(int argc, char **argv) {
int r;
#pragma omp target exit data // expected-error {{expected at least one 'map' clause for '#pragma omp target exit data'}}
#pragma omp target exit data map(r) // expected-error {{map type must be specified for '#pragma omp target exit data'}}
#pragma omp target exit data map(tofrom: r) // expected-error {{map type 'tofrom' is not allowed for '#pragma omp target exit data'}}
#pragma omp target exit data map(always, from: r)
#pragma omp target exit data map(delete: r)
#pragma omp target exit data map(release: r)
#pragma omp target exit data map(always, alloc: r) // expected-error {{map type 'alloc' is not allowed for '#pragma omp target exit data'}}
#pragma omp target exit data map(to: r) // expected-error {{map type 'to' is not allowed for '#pragma omp target exit data'}}
return 0;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (16*32)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int tms = 16;
int th = 32;
int threads[1]; threads[0] = th-1;
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: private, shared clauses on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[idx][i] += p; \
B[idx][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p; \
B[idx][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: lastprivate clause on omp target teams distribute parallel for with nested parallel.
//
TESTD("omp target teams distribute parallel for dist_schedule(static,2) num_teams(tms) num_threads(th)",
for (int idx = 0; idx < tms*th; idx++) {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
S[idx] = 0;
for (int i = 0; i < N; i++) {
A[idx][i] = B[idx][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[idx][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[idx][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[idx][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[idx][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
}
, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) ));
//
// Test: private clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[idx][i] += p[0]; \
B[idx][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p[0]; \
B[idx][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: collapse clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: ordered clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[idx] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
return 0;
}
|
Chromosome.h | /*
* Chrom.h
*
* Created on: 14 de mar de 2016
* Author: luan
*/
#ifndef CHROM_H_
#define CHROM_H_
#include <iostream>
#include <vector>
#include <list>
#include <limits>
#include <algorithm>
#include <random>
#include "Data.h"
#include <omp.h>
using namespace std;
struct vm_time {
vector<pair<double, double>> life;
double last;
double static_vm = false;
double storage = 0.0;
int count_file = 0;
};
std::random_device rd_chr;
std::mt19937 engine_chr(rd_chr());
class Chromosome {
public:
vector<int> allocation, height_soft;
vector<double> time_vector, start_time_vector;
vector<int> ordering;
double fitness, lambda, transfer_size;
Data* data;
unordered_map<int, vector<string>> scheduler;
unordered_map<int, vector<int>> vm_queue;
Chromosome(Data* data, double lambda) :
allocation(data->size, -1), height_soft(data->task_size, -1),
time_vector(data->task_size, -1), start_time_vector(data->task_size, -1),
ordering(0), fitness(0), lambda(lambda), transfer_size(0), data(data) {
computeHeightSoft(data->id_root);
encode();
computeFitness();
}
Chromosome(const Chromosome* other) :
allocation(other->allocation), height_soft(other->height_soft),
time_vector(other->time_vector), start_time_vector(other->start_time_vector),
ordering(other->ordering), fitness(other->fitness),
lambda(other->lambda), transfer_size(other->transfer_size), data(other->data),
scheduler(other->scheduler), vm_queue(other->vm_queue) {}
Chromosome() {}
//virtual ~Chromosome();
virtual ~Chromosome() {
// TODO Auto-generated destructor stub
}
// Compute the fitness of chromosome
void computeFitness(bool check_storage = true, bool check_sequence = false) {
fill(time_vector.begin(), time_vector.end(), -1);
fill(start_time_vector.begin(), start_time_vector.end(), -1);
vector<double> queue(data->vm_size, 0);
if (check_storage && !checkFiles()) {
std::cerr << "check file error" << endl;
throw;
}
scheduler.clear();
vm_queue.clear();
// compute makespan
for (auto id_task : ordering) {//for each task, do
if (id_task != data->id_root && id_task != data->id_sink) {//if is not root or sink than
if (check_sequence && !checkTaskSeq(id_task)) {
std::cerr << "Encode error - Chrom: Error in the precedence relations." << endl;
throw;
}
// Load Vm
auto vm = data->vm_map.find(allocation[id_task])->second;
auto task = data->task_map.find(id_task)->second;
// update vm queue
auto f_queue = vm_queue.insert(make_pair(vm.id, vector<int>()));
f_queue.first->second.push_back(task.id);
// update scheduler
auto f_scheduler = scheduler.insert(make_pair(vm.id, vector<string>()));
f_scheduler.first->second.push_back(task.tag);
// Compute Task Times
auto start_time = ST(&task, &vm, queue);
auto read_time = RT(&task, &vm);
auto run_time = ceil(task.base_time * vm.slowdown);//seconds
auto write_time = WT(&task, &vm);
auto finish_time = start_time + read_time + run_time + write_time;
//update structures
time_vector[id_task] = finish_time;
start_time_vector[id_task] = start_time;
queue[vm.id] = finish_time;
} else {// root and sink tasks
if (id_task == data->id_root)
time_vector[id_task] = 0;
else {//sink task
double max_value = 0.0;
for (auto tk : data->prec.find(id_task)->second)
max_value = std::max(max_value, time_vector[tk]);
time_vector[id_task] = max_value;
}
}
}
fitness = time_vector[data->id_sink];
}
/*crossover*/
Chromosome crossover(Chromosome* partner) {
Chromosome chr(partner);
uniform_int_distribution<> dis_ordering(0, data->task_size - 1);
uniform_int_distribution<> dis_allocation(0, data->size - 1);
int point_ordering = dis_ordering(engine_chr);//crossover point to ordering list
int point_allocation = dis_allocation(engine_chr);//crossover point to allocation list
//allocation crossover (Single point crossover)
for (int i = 0; i < point_allocation; i++) {
chr.allocation[i] = allocation[i];
}
//ordering crossover
vector<bool> aux(data->task_size, false);
chr.ordering.clear();
//ordering crossover first part self -> chr
for (auto i = 0; i < point_ordering; i++) {
chr.ordering.push_back(ordering[i]);
aux[ordering[i]] = true;
}
//Ordering crossover second part partner -> chr
for (auto i = 0; i < data->task_size; i++) {
if (!aux[partner->ordering[i]])
chr.ordering.push_back(partner->ordering[i]);
}
return chr;
}
//Mutation on allocation chromosome
void mutate(double prob) {
uniform_int_distribution<> idis(0, data->vm_size - 1);
for (int i = 0; i < data->size; i++) {
if (((float) random() / (float) RAND_MAX) <= prob) {
allocation[i] = idis(engine_chr);
}
}
}
void print() {
cout << "#!# " << fitness << endl;
cout << "Tasks: " << endl;
for (auto info : data->vm_map) {
auto vm = info.second;
cout << vm.id << ": ";
auto f = scheduler.find(vm.id);
if (f != scheduler.end()) {
for (auto task_tag: f->second)
cout << task_tag << " ";
}
cout << endl;
}
cout << endl;
/*for(auto info : data->vm_map){
auto vm = info.second;
cout << "[" << vm.id << "]" << " <" << vm.name << "> : ";
auto f = scheduler.find(vm.id);
if(f != scheduler.end()) {
for (auto task_tag : f->second)
cout << task_tag << " ";
}
cout << endl;
}*/
cout << "Files: " << endl;
for (auto info: data->vm_map) {
auto vm = info.second;
cout << vm.id << ": ";
for (auto info : data->file_map) {
auto file = info.second;
int vm_id = file.is_static ? file.static_vm : allocation[file.id];
if (vm_id == vm.id)
cout << file.name << " ";
}
cout << endl;
}
/*for(auto info : data->file_map){
auto file = info.second;
int vm_id = file.is_static ? file.static_vm : allocation[file.id];
cout << "[" << vm_id << ", " << file.name << "]" << " ";
}*/
//cout << endl;
/*cout << "Task Sequence: " << endl;
for(auto task_id : ordering)
if(task_id != data->id_root && task_id && data->id_sink)
cout << data->task_map.find(task_id)->second.name << ", ";
cout << endl;*/
}
// Compute distance between two solutions
int getDistance(const Chromosome &chr) {
int distance = 0;
#pragma omp parallel sections reduction(+:distance)
{
#pragma omp section
{
// compute the distance based on position
for (int i = 0; i < data->size; i++) {
if (chr.allocation[i] != allocation[i]) {
distance += 1;
}
}
}
#pragma omp section
{
// compute the distance based on swaps required
vector<int> aux_ordering(ordering);
for (int i = 0; i < data->task_size; i++) {
if (chr.ordering[i] != aux_ordering[i]) {
distance += 1;
for (int j = i + 1; j < data->task_size; j++) {
if (chr.ordering[i] == aux_ordering[j]) {
iter_swap(aux_ordering.begin() + i, aux_ordering.begin() + j);
}
}
}
}
}
}
return distance;
}
private:
int computeHeightSoft(int node) {
int min = numeric_limits<int>::max();
if (height_soft[node] != -1)
return height_soft[node];
if (node != data->id_sink) {
for (auto i : data->succ.find(node)->second) {
int value = computeHeightSoft(i);
min = std::min(value, min);
}
} else {
height_soft[node] = data->height[node];
return height_soft[node];
}
uniform_int_distribution<> dis(data->height[node], min - 1);
height_soft[node] = dis(engine_chr);
return height_soft[node];
}
// Random encode (new chromosome)
void encode() {
vector<int> seq_list(boost::counting_iterator<int>(0u), boost::counting_iterator<int>(height_soft.size()));
sort(begin(seq_list), end(seq_list), [&](const int &i, const int &j) {
return height_soft[i] < height_soft[j];
});//sort a list based on height soft
for (auto task : seq_list)//encode ordering Chromosome
ordering.push_back(task);
//Encode allocation chromosome
uniform_int_distribution<> dis(0, data->vm_size - 1);
for (int i = 0; i < data->size; i++)
allocation[i] = dis(engine_chr);
}
/* Checks the sequence of tasks is valid */
inline bool checkTaskSeq(int task) {
for (auto tk : data->prec.find(task)->second)
if (time_vector[tk] == -1)
return false;
return true;
}
// Check and organize the file based on the storage capacity
inline bool checkFiles() {
bool flag = true;
int count = 0;
vector<double> aux_storage(data->storage_vet);
vector<int> aux(data->vm_size);
iota(aux.begin(), aux.end(), 0); // 0,1,2,3,4 ... n
unordered_map<int, vector<int> > map_file;
int id_vm;
// build file map and compute the storage
for (auto it : data->file_map) {
if (!it.second.is_static) {
id_vm = allocation[it.second.id];
auto f = map_file.insert(make_pair(id_vm, vector<int>()));
f.first->second.push_back(it.second.id);
auto file = data->file_map.find(it.second.id)->second;
aux_storage[id_vm] -= file.size;
}
}
do {
//sort machines based on the storage capacity
sort(aux.begin(), aux.end(), [&](const int &a, const int &b) {
return aux_storage[a] < aux_storage[b];
});
if (aux_storage[aux[0]] < 0) {//if storage is full, start the file heuristic
cout << "Starting file heuristic ..." << endl;
int old_vm = aux[0]; //critical machine
int new_vm = aux[aux.size() - 1];//best machine
auto vet_file = map_file.find(old_vm)->second;
double min = numeric_limits<double>::max();
int min_file = -1;
//search min file (based on the size of file)
for_each(vet_file.begin(), vet_file.end(), [&](int i) {
cout << i << endl;
auto file = data->file_map.find(i)->second;
cout << file.name << endl;
if (file.size < min) {
min = file.size;
min_file = file.id;
}
});
auto file_min = data->file_map.find(min_file)->second;
cout << file_min.name << endl;
//minFile will be move to machine with more empty space
allocation[file_min.id] = new_vm;
//update aux Storage
aux_storage[old_vm] += file_min.size;
aux_storage[new_vm] -= file_min.size;
//Update mapFile structure
map_file[old_vm].erase(remove(map_file[old_vm].begin(), map_file[old_vm].end(), min_file),
map_file[old_vm].end());
map_file[new_vm].push_back(min_file);
} else flag = false;
count++;
} while (flag && count < data->file_size);
return !flag;
}
// Start Time
inline double ST(Task* task, VMachine* vm, const vector<double> &queue) {
// compute wait time
double max_pred_time = 0.0;
for (auto tk : data->prec.find(task->id)->second)
max_pred_time = std::max(max_pred_time, time_vector[tk]);
return std::max(max_pred_time, queue[vm->id]);
}
// Read time
inline double RT(Task* task, VMachine* vm) {
//compute read time
double read_time = 0;
int id_vm_file;
for (auto id_file : task->input) {
auto file = data->file_map.find(id_file)->second;
if (!file.is_static) {
id_vm_file = allocation[file.id];
// update vm queue
auto f_queue = vm_queue.insert(make_pair(id_vm_file, vector<int>()));
f_queue.first->second.push_back(file.id);
} else
id_vm_file = file.static_vm;
auto vm_file = data->vm_map.find(id_vm_file)->second;
read_time += ceil(TT(&file, vm, &vm_file) + (file.size * lambda));
}
return read_time;
} //Write time
// Write time
inline double WT(Task* task, VMachine* vm) {
//compute the write time
double write_time = 0;
for (auto id_file :task->output) {
auto file = data->file_map.find(id_file)->second;
auto vm_file = data->vm_map.find(allocation[file.id])->second;
// update vm queue
auto f_queue = vm_queue.insert(make_pair(vm_file.id, vector<int>()));
f_queue.first->second.push_back(id_file);
write_time += ceil(TT(&file, vm, &vm_file) + (file.size * (lambda * 2)));
}
return write_time;
}
// Transfer Time
inline double TT(File* file, VMachine* vm1, VMachine* vm2) {
if (vm1->id != vm2->id) {// if vm1 != vm2
//get the smallest link
auto link = std::min(vm1->bandwidth, vm2->bandwidth);
return file->size / link;
}
return 0;//otherwise
}
};
#endif /* CHROM_H_ */
|
KDTree.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_KDTREE_H_
#define _SPTAG_COMMON_KDTREE_H_
#include <iostream>
#include <vector>
#include <string>
#include <shared_mutex>
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "QueryResultSet.h"
#include "WorkSpace.h"
#pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details.
namespace SPTAG
{
namespace COMMON
{
// node type for storing KDT
struct KDTNode
{
SizeType left;
SizeType right;
DimensionType split_dim;
float split_value;
};
class KDTree
{
public:
KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {}
KDTree(const KDTree& other) : m_iTreeNumber(other.m_iTreeNumber),
m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit),
m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {}
~KDTree() {}
inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; }
inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; }
inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); }
inline SizeType sizePerTree() const {
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back();
}
template <typename T>
void Rebuild(VectorIndex* p_index)
{
COMMON::KDTree newTrees(*this);
newTrees.BuildTrees<T>(p_index, nullptr, 1);
std::unique_lock<std::shared_timed_mutex> lock(*m_lock);
m_pTreeRoots.swap(newTrees.m_pTreeRoots);
m_pTreeStart.swap(newTrees.m_pTreeStart);
}
template <typename T>
void BuildTrees(VectorIndex* p_index, std::vector<SizeType>* indices = nullptr, int numOfThreads = omp_get_num_threads())
{
std::vector<SizeType> localindices;
if (indices == nullptr) {
localindices.resize(p_index->GetNumSamples());
for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i;
}
else {
localindices.assign(indices->begin(), indices->end());
}
m_pTreeRoots.resize(m_iTreeNumber * localindices.size());
m_pTreeStart.resize(m_iTreeNumber, 0);
#pragma omp parallel for num_threads(numOfThreads)
for (int i = 0; i < m_iTreeNumber; i++)
{
Sleep(i * 100); std::srand(clock());
std::vector<SizeType> pindices(localindices.begin(), localindices.end());
std::random_shuffle(pindices.begin(), pindices.end());
m_pTreeStart[i] = i * (SizeType)pindices.size();
std::cout << "Start to build KDTree " << i + 1 << std::endl;
SizeType iTreeSize = m_pTreeStart[i];
DivideTree<T>(p_index, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize);
std::cout << i + 1 << " KDTree built, " << iTreeSize - m_pTreeStart[i] << " " << pindices.size() << std::endl;
}
}
inline std::uint64_t BufferSize() const
{
return sizeof(int) + sizeof(SizeType) * m_iTreeNumber +
sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size();
}
bool SaveTrees(std::ostream& p_outstream) const
{
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
p_outstream.write((char*)&m_iTreeNumber, sizeof(int));
p_outstream.write((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber);
SizeType treeNodeSize = (SizeType)m_pTreeRoots.size();
p_outstream.write((char*)&treeNodeSize, sizeof(SizeType));
p_outstream.write((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize);
std::cout << "Save KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
bool SaveTrees(std::string sTreeFileName) const
{
std::cout << "Save KDT to " << sTreeFileName << std::endl;
std::ofstream output(sTreeFileName, std::ios::binary);
if (!output.is_open()) return false;
SaveTrees(output);
output.close();
return true;
}
bool LoadTrees(char* pKDTMemFile)
{
m_iTreeNumber = *((int*)pKDTMemFile);
pKDTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber);
pKDTMemFile += sizeof(SizeType)*m_iTreeNumber;
SizeType treeNodeSize = *((SizeType*)pKDTMemFile);
pKDTMemFile += sizeof(SizeType);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize);
std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
bool LoadTrees(std::string sTreeFileName)
{
std::cout << "Load KDT From " << sTreeFileName << std::endl;
std::ifstream input(sTreeFileName, std::ios::binary);
if (!input.is_open()) return false;
input.read((char*)&m_iTreeNumber, sizeof(int));
m_pTreeStart.resize(m_iTreeNumber);
input.read((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber);
SizeType treeNodeSize;
input.read((char*)&treeNodeSize, sizeof(SizeType));
m_pTreeRoots.resize(treeNodeSize);
input.read((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize);
input.close();
std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl;
return true;
}
template <typename T>
void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const
{
for (int i = 0; i < m_iTreeNumber; i++) {
KDTSearch(p_index, p_query, p_space, m_pTreeStart[i], 0);
}
}
template <typename T>
void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const
{
while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits)
{
auto& tcell = p_space.m_SPTQueue.pop();
KDTSearch(p_index, p_query, p_space, tcell.node, tcell.distance);
}
}
private:
template <typename T>
void KDTSearch(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query,
COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const {
if (node < 0)
{
SizeType index = -node - 1;
if (index >= p_index->GetNumSamples()) return;
#ifdef PREFETCH
const char* data = (const char *)(p_index->GetSample(index));
_mm_prefetch(data, _MM_HINT_T0);
_mm_prefetch(data + 64, _MM_HINT_T0);
#endif
if (p_space.CheckAndSet(index)) return;
++p_space.m_iNumberOfTreeCheckedLeaves;
++p_space.m_iNumberOfCheckedLeaves;
p_space.m_NGQueue.insert(COMMON::HeapCell(index, p_index->ComputeDistance((const void*)p_query.GetTarget(), (const void*)data)));
return;
}
auto& tnode = m_pTreeRoots[node];
float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value;
float distanceBound = distBound + diff * diff;
SizeType otherChild, bestChild;
if (diff < 0)
{
bestChild = tnode.left;
otherChild = tnode.right;
}
else
{
otherChild = tnode.left;
bestChild = tnode.right;
}
p_space.m_SPTQueue.insert(COMMON::HeapCell(otherChild, distanceBound));
KDTSearch(p_index, p_query, p_space, bestChild, distBound);
}
template <typename T>
void DivideTree(VectorIndex* p_index, std::vector<SizeType>& indices, SizeType first, SizeType last,
SizeType index, SizeType &iTreeSize) {
ChooseDivision<T>(p_index, m_pTreeRoots[index], indices, first, last);
SizeType i = Subdivide<T>(p_index, m_pTreeRoots[index], indices, first, last);
if (i - 1 <= first)
{
m_pTreeRoots[index].left = -indices[first] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].left = iTreeSize;
DivideTree<T>(p_index, indices, first, i - 1, iTreeSize, iTreeSize);
}
if (last == i)
{
m_pTreeRoots[index].right = -indices[last] - 1;
}
else
{
iTreeSize++;
m_pTreeRoots[index].right = iTreeSize;
DivideTree<T>(p_index, indices, i, last, iTreeSize, iTreeSize);
}
}
template <typename T>
void ChooseDivision(VectorIndex* p_index, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last)
{
std::vector<float> meanValues(p_index->GetFeatureDim(), 0);
std::vector<float> varianceValues(p_index->GetFeatureDim(), 0);
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)p_index->GetSample(indices[j]);
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
meanValues[k] += v[k];
}
}
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
meanValues[k] /= count;
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)p_index->GetSample(indices[j]);
for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++)
{
float dist = v[k] - meanValues[k];
varianceValues[k] += dist*dist;
}
}
// choose the split dimension as one of the dimension inside TOP_DIM maximum variance
node.split_dim = SelectDivisionDimension(varianceValues);
// determine the threshold
node.split_value = meanValues[node.split_dim];
}
DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const
{
// Record the top maximum variances
std::vector<DimensionType> topind(m_numTopDimensionKDTSplit);
int num = 0;
// order the variances
for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++)
{
if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]])
{
if (num < m_numTopDimensionKDTSplit)
{
topind[num++] = i;
}
else
{
topind[num - 1] = i;
}
int j = num - 1;
// order the TOP_DIM variances
while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]])
{
std::swap(topind[j], topind[j - 1]);
j--;
}
}
}
// randomly choose a dimension from TOP_DIM
return topind[COMMON::Utils::rand(num)];
}
template <typename T>
SizeType Subdivide(VectorIndex* p_index, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const
{
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
SizeType ind = indices[i];
const T* v = (const T*)p_index->GetSample(ind);
float val = v[node.split_dim];
if (val < node.split_value)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
return i;
}
private:
std::vector<SizeType> m_pTreeStart;
std::vector<KDTNode> m_pTreeRoots;
public:
std::unique_ptr<std::shared_timed_mutex> m_lock;
int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples;
};
}
}
#endif
|
matrixop1.h | #ifndef MATRIXOP1_H
#define MATRIXOP1_H
/* * Declarations of matrix functions in Dymola
*
* Copyright (C) 1997-2001 Dynasim AB.
* All rights reserved.
*
* Author: Hans Olsson Dynasim AB, 1999
* Version: 1.4, 1999-09-24*/
/* */
#include <math.h>
/* To make functions and variables static to allow several S-function blocks
for real-time targets */
#if !defined(DYMOLA_STATIC)
#if defined(__cplusplus)
#define DYMOLA_STATIC extern
#elif defined(RT) && !defined(DYM2DS)
#define DYMOLA_STATIC static
#else
#define DYMOLA_STATIC
#endif
#endif
#if !defined(DYMOLA_STATIC2)
#if defined(__cplusplus)
#define DYMOLA_STATIC2 extern
#elif defined(RT) && !defined(DYM2DS)
#define DYMOLA_STATIC2 static
#else
#define DYMOLA_STATIC2 extern
#endif
#endif
#if defined(Matlab5) || defined(Matlab51) || defined(SimStruct) || defined(DYM2DS) || defined(FMU_SOURCE_CODE_EXPORT)
#if !defined(DynSimStruct)
#define DynSimStruct 1
/* Needed for dsutil.h in Modelica mode. */
#endif
#endif
#define Modelica
#include <stddef.h>
#include <stdarg.h>
#include <math.h>
#include "dsblock.h"
#include "f2c.h"
#ifndef DYN_MarkObject
#define DYN_MarkObject
typedef double Real;
#if defined(DYM2CCUR)
typedef int Integer;
#else
typedef int Integer;
#endif
typedef const char* String;
typedef Integer SizeType;
struct BufferObject {Real *Realbuffer;Integer *Integerbuffer;SizeType* Sizebuffer;String* Stringbuffer;};
struct MarkObject {struct BufferObject place,mark;};
#endif
#if defined(RT)
DYMOLA_STATIC2 Real Realbuffer[
#ifndef DYNREALBUFFER
500000
#else
DYNREALBUFFER
#endif
];
DYMOLA_STATIC2 Integer Integerbuffer[
#ifndef DYNINTEGERBUFFER
50000
#else
DYNINTEGERBUFFER
#endif
];
DYMOLA_STATIC2 SizeType Sizebuffer[
#ifndef DYNINTEGERBUFFER
50000
#else
DYNINTEGERBUFFER
#endif
];
DYMOLA_STATIC2 String Stringbuffer[
#ifndef DYNSTRINGBUFFER
10000
#else
DYNSTRINGBUFFER
#endif
];
DYMOLA_STATIC2 char simplestring[
#ifndef DYNSTRINGBUFFER
100000
#else
DYNSTRINGBUFFER
#endif
];
DYMOLA_STATIC2 Real RealbufferNon[
#ifndef DYNREALBUFFER
70000
#else
DYNREALBUFFER/10
#endif
];
DYMOLA_STATIC2 Integer IntegerbufferNon[
#ifndef DYNINTEGERBUFFER
50000
#else
DYNINTEGERBUFFER
#endif
];
DYMOLA_STATIC2 SizeType SizebufferNon[
#ifndef DYNINTEGERBUFFER
50000
#else
DYNINTEGERBUFFER
#endif
];
DYMOLA_STATIC2 String StringbufferNon[
#ifndef DYNSTRINGBUFFER
10000
#else
DYNSTRINGBUFFER
#endif
];
DYMOLA_STATIC2 char simplestringNon[
#ifndef DYNSTRINGBUFFER
10000
#else
DYNSTRINGBUFFER
#endif
];
DYMOLA_STATIC Real* EndRealbufferNon=RealbufferNon+sizeof(RealbufferNon)/sizeof(*RealbufferNon);
DYMOLA_STATIC Integer* EndIntegerbufferNon=IntegerbufferNon+sizeof(IntegerbufferNon)/sizeof(*IntegerbufferNon);
DYMOLA_STATIC SizeType* EndSizebufferNon=SizebufferNon+sizeof(SizebufferNon)/sizeof(*SizebufferNon);
DYMOLA_STATIC String* EndStringbufferNon=StringbufferNon+sizeof(StringbufferNon)/sizeof(*StringbufferNon);
DYMOLA_STATIC char* EndsimplestringNon=simplestringNon+sizeof(simplestringNon)/sizeof(*simplestringNon);
#if (defined(_OPENMP) && !defined(DISABLE_DYMOLA_OPENMP))
DYMOLA_STATIC Real* EndRealbuffer=0;
#pragma omp threadprivate(EndRealbuffer)
DYMOLA_STATIC Integer* EndIntegerbuffer=0;
#pragma omp threadprivate(EndIntegerbuffer)
DYMOLA_STATIC SizeType* EndSizebuffer=0;
#pragma omp threadprivate(EndSizebuffer)
DYMOLA_STATIC String* EndStringbuffer=0;
#pragma omp threadprivate(EndStringbuffer)
DYMOLA_STATIC char* Endsimplestring=0;
#pragma omp threadprivate(Endsimplestring)
DYMOLA_STATIC Real* EndRealbuffer2=Realbuffer+sizeof(Realbuffer)/sizeof(*Realbuffer);
DYMOLA_STATIC Integer* EndIntegerbuffer2=Integerbuffer+sizeof(Integerbuffer)/sizeof(*Integerbuffer);
DYMOLA_STATIC SizeType* EndSizebuffer2=Sizebuffer+sizeof(Sizebuffer)/sizeof(*Sizebuffer);
DYMOLA_STATIC String* EndStringbuffer2=Stringbuffer+sizeof(Stringbuffer)/sizeof(*Stringbuffer);
DYMOLA_STATIC char* Endsimplestring2=simplestring+sizeof(simplestring)/sizeof(*simplestring);
#else
DYMOLA_STATIC Real* EndRealbuffer=Realbuffer+sizeof(Realbuffer)/sizeof(*Realbuffer);
DYMOLA_STATIC Integer* EndIntegerbuffer=Integerbuffer+sizeof(Integerbuffer)/sizeof(*Integerbuffer);
DYMOLA_STATIC SizeType* EndSizebuffer=Sizebuffer+sizeof(Sizebuffer)/sizeof(*Sizebuffer);
DYMOLA_STATIC String* EndStringbuffer=Stringbuffer+sizeof(Stringbuffer)/sizeof(*Stringbuffer);
DYMOLA_STATIC char* Endsimplestring=simplestring+sizeof(simplestring)/sizeof(*simplestring);
#endif
#else
DYMOLA_STATIC2 Real Realbuffer[];
DYMOLA_STATIC2 Integer Integerbuffer[];
DYMOLA_STATIC2 SizeType Sizebuffer[];
DYMOLA_STATIC2 String Stringbuffer[];
DYMOLA_STATIC2 char simplestring[];
DYMOLA_STATIC2 Real* EndRealbuffer;
DYMOLA_STATIC2 Integer* EndIntegerbuffer;
DYMOLA_STATIC2 SizeType* EndSizebuffer;
DYMOLA_STATIC2 String* EndStringbuffer;
DYMOLA_STATIC2 char* Endsimplestring;
DYMOLA_STATIC2 Real RealbufferNon[];
DYMOLA_STATIC2 Integer IntegerbufferNon[];
DYMOLA_STATIC2 SizeType SizebufferNon[];
DYMOLA_STATIC2 String StringbufferNon[];
DYMOLA_STATIC2 char simplestringNon[];
DYMOLA_STATIC2 Real* EndRealbufferNon;
DYMOLA_STATIC2 Integer* EndIntegerbufferNon;
DYMOLA_STATIC2 SizeType* EndSizebufferNon;
DYMOLA_STATIC2 String* EndStringbufferNon;
DYMOLA_STATIC2 char* EndsimplestringNon;
#endif
#ifndef __cplusplus
typedef struct BufferObject BufferObject;
typedef struct MarkObject MarkObject;
#endif
/* Memory allocation */
DYMOLA_STATIC MarkObject PushMark(void);
DYMOLA_STATIC void RePushMark(MarkObject*);
DYMOLA_STATIC void Release(); /* Release: used after each matrix assignment in the function */
DYMOLA_STATIC void PopMark(MarkObject oldMark);/* At the end of the function */
DYMOLA_STATIC void PopMarkMarks();/* Major pop, e.g. at end of intgegration */
DYMOLA_STATIC char* GetStringMark();
DYMOLA_STATIC void SetStringMark(char*);
DYMOLA_STATIC const char* SqueezeString(const char*s, char*startMark);
/* Data structure for arrays of arbitrary dimension, stored in Fortran Style. */
/* Adressing of individual elements start at 1 */
typedef struct IntegerArray {
SizeType ndims; /* Length of dims */
SizeType *dims; /* A.dims[i-1]=size(a,i) */
Integer *data;
} IntegerArray;
typedef struct RealArray {
SizeType ndims;
SizeType *dims;
Real *data;
} RealArray;
typedef struct StringArray {
SizeType ndims;
SizeType *dims;
String *data;
} StringArray;
/* Each argument is tagged by Subtag */
enum Subtag {
Colon, /* Corresponds to a[:] */
Range, /* Corresponds to a[f:t] (two Integer arguments)*/
RangeRange, /* Corresponds to a[f:s:t] (three Integer arguments) */
Vector, /* Corresponds to a[v] where v is IntegerArray (actually vector) (one IntegerArray argument) */
Index, /* Correspons to a[i] where i is an Integer (one Integer argument) */
EndMark /* End of the argument list */
};
/* Start of common routines */
DYMOLA_STATIC RealArray RealNonTemporary(SizeType ndims,...);
DYMOLA_STATIC IntegerArray IntegerNonTemporary(SizeType ndims,...);
DYMOLA_STATIC StringArray StringNonTemporary(SizeType ndims,...);
DYMOLA_STATIC SizeType FindIndex(SizeType ndims,SizeType*dims,va_list ap);
DYMOLA_STATIC SizeType *SizeTemp(SizeType r);
DYMOLA_STATIC RealArray RealMatch(const RealArray a);
DYMOLA_STATIC IntegerArray IntegerMatch(const IntegerArray a);
DYMOLA_STATIC StringArray StringMatch(const StringArray a);
/* Construct array from sizes, using variable number of arguments */
DYMOLA_STATIC RealArray RealTemporary(SizeType ndims,...);
/* Special cases for vectors and matrices (most often used) */
DYMOLA_STATIC RealArray RealTemporaryMatrix(SizeType r,SizeType c);
DYMOLA_STATIC RealArray RealTemporaryVector(SizeType r);
/* Assignment */
DYMOLA_STATIC void RealAssign(RealArray a,const RealArray b);
/* Indexing to set/get elements */
DYMOLA_STATIC void* RecordElement(const RealArray a,...);
DYMOLA_STATIC Real RealElement(const RealArray a,...);
/* Sizes */
DYMOLA_STATIC Integer RealSize(const RealArray a,SizeType i);
DYMOLA_STATIC IntegerArray RealSizes(const RealArray a);
/* Set element, note that val is prior to the index list. */
DYMOLA_STATIC void SetRealElement(Real val,RealArray a,...);
/* For debug write out */
DYMOLA_STATIC void RealWrite(const RealArray a);
/* Construct Arrays from other arrays */
DYMOLA_STATIC RealArray RealArrayArray(SizeType narg,RealArray first,...);
/* Construct arrays from scalars */
DYMOLA_STATIC RealArray RealScalarArray(SizeType narg,...);
/* Construct from Record Arrays */
DYMOLA_STATIC RealArray RecordArrayRealSlice(const RealArray a,size_t off);
DYMOLA_STATIC RealArray RecordArrayRealArraySlice(const RealArray a,size_t off);
/* Concatenate along dimensions 'along' (1..ndims) and given nargs arguments */
DYMOLA_STATIC RealArray RealCat(SizeType along,SizeType nargs,RealArray first,...);
/* The helper in Modelica */
DYMOLA_STATIC RealArray RealPromote(const RealArray a,SizeType n);
DYMOLA_STATIC RealArray RealPromoteScalar(const Real x,SizeType n);
/* Use as: RealGetSub(a,Colon,Index,3,Range,3,4,EndMark)=a[:,3,3:4] */
DYMOLA_STATIC RealArray RealGetSub(const RealArray a,...);
DYMOLA_STATIC void RealPutSub(const RealArray a,RealArray out,...);
/* For each operation Op(...) we create a temporary result res */
/* and call OpAssign(res,...) */
/* The operations Op(...) are convenient and used in the code */
/* Fill */
DYMOLA_STATIC RealArray RealFillAssign(RealArray res,const Real t);
DYMOLA_STATIC RealArray RealFill(const Real t,SizeType ndims,...);
DYMOLA_STATIC RealArray RealFillArray(const RealArray a,SizeType ndims,...);
DYMOLA_STATIC Real Realscalar(const RealArray a);
/* Matrix operations not limited to numeric matrices */
DYMOLA_STATIC RealArray Realvector(const RealArray a);
DYMOLA_STATIC RealArray Realmatrix(const RealArray a);
DYMOLA_STATIC RealArray Realtranspose(const RealArray a);
DYMOLA_STATIC RealArray Realouterproduct(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray Realsymmetric(const RealArray a);
/* End of common routines for String*/
/* Basic operations, add, subtract, scale*/
/* For each operation Op(...) we create a temporary result res */
/* and call OpAssign(res,...) */
/* The operations Op(...) are convenient and used in the code */
DYMOLA_STATIC RealArray RealAddAssign(RealArray res,const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealAdd(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealSubtractAssign(RealArray res,const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealSubtract(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealScaleAssign(RealArray res,const RealArray a,const Real t);
DYMOLA_STATIC RealArray RealScale(const RealArray a,const Real t);
DYMOLA_STATIC RealArray RealMinusAssign(RealArray res,const RealArray a);
DYMOLA_STATIC RealArray RealMinus(const RealArray a);
/* sum, max, min, and product */
DYMOLA_STATIC Real Realsum(const RealArray a);
DYMOLA_STATIC Real RealAbssum(const RealArray a);
DYMOLA_STATIC Real RealAbssumDiff(const RealArray a,const RealArray b);
DYMOLA_STATIC Real Realmax(const RealArray a);
DYMOLA_STATIC Real Realmin(const RealArray a);
DYMOLA_STATIC Real Realproduct(const RealArray a);
DYMOLA_STATIC RealArray Realdiagonal(const RealArray a);
DYMOLA_STATIC RealArray Realcross(const RealArray x,const RealArray y);
DYMOLA_STATIC RealArray Realskew(const RealArray x);
/* Multiply */
DYMOLA_STATIC RealArray RealMultiplyMMAssign(RealArray res,
const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealMultiplyMVAssign(RealArray res,const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealMultiplyVMAssign(RealArray res,const RealArray a,const RealArray b);
DYMOLA_STATIC Real RealMultiplyVV(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealMultiplyMM(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealMultiplyMV(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealMultiplyVM(const RealArray a,const RealArray b);
DYMOLA_STATIC RealArray RealIdentity(SizeType n);
DYMOLA_STATIC RealArray RealPow(const RealArray a,const Integer n);
/* For from:stride:to */
DYMOLA_STATIC RealArray RealRange(const Real from,const Real to,const Real stride);
/* End of common routines with Integer */
/* Unique routines */
DYMOLA_STATIC RealArray linspace(const Real from,const Real to,const Integer n);
/* Go from IntegerArray to RealArray */
DYMOLA_STATIC RealArray RealConvertInteger(const IntegerArray a);
DYMOLA_STATIC IntegerArray IntegerConvertReal(const RealArray a);
DYMOLA_STATIC RealArray RealScaleDiv(const RealArray a,const Real t);
/* Routines for Integer */
/* Create Array-temporaries given the dimensions */
/* Construct array from sizes, using variable number of arguments */
DYMOLA_STATIC IntegerArray IntegerTemporary(SizeType ndims,...);
/* Special cases for vectors and matrices (most often used) */
DYMOLA_STATIC IntegerArray IntegerTemporaryMatrix(SizeType r,SizeType c);
DYMOLA_STATIC IntegerArray IntegerTemporaryVector(SizeType r);
/* Assignment */
DYMOLA_STATIC void IntegerAssign(IntegerArray a,const IntegerArray b);
/* Indexing to set/get elements */
DYMOLA_STATIC Integer IntegerElement(const IntegerArray a,...);
/* Sizes */
DYMOLA_STATIC Integer IntegerSize(const IntegerArray a,SizeType i);
DYMOLA_STATIC IntegerArray IntegerSizes(const IntegerArray a);
/* Set element, note that val is prior to the index list. */
DYMOLA_STATIC void SetIntegerElement(Integer val,IntegerArray a,...);
/* For debug write out */
DYMOLA_STATIC void IntegerWrite(const IntegerArray a);
/* Construct Arrays from other arrays */
DYMOLA_STATIC IntegerArray IntegerArrayArray(SizeType narg,IntegerArray first,...);
/* Construct arrays from scalars */
DYMOLA_STATIC IntegerArray IntegerScalarArray(SizeType narg,...);
/* Construct from Record Arrays */
DYMOLA_STATIC IntegerArray RecordArrayIntegerSlice(const RealArray a,size_t off);
DYMOLA_STATIC IntegerArray RecordArrayIntegerArraySlice(const RealArray a,size_t off);
/* Concatenate along dimensions 'along' (1..ndims) and given nargs arguments */
DYMOLA_STATIC IntegerArray IntegerCat(SizeType along,SizeType nargs,IntegerArray first,...);
/* The helper in Modelica */
DYMOLA_STATIC IntegerArray IntegerPromote(const IntegerArray a,SizeType n);
DYMOLA_STATIC IntegerArray IntegerPromoteScalar(const Integer x,SizeType n);
/* Use as: IntegerGetSub(a,Colon,Index,3,Range,3,4,EndMark)=a[:,3,3:4] */
DYMOLA_STATIC IntegerArray IntegerGetSub(const IntegerArray a,...);
/* Use as IntegerPutSub(a, out,Index,3,Colon,Range,3,4) yields out[3,:,3:4]=a; */
DYMOLA_STATIC void IntegerPutSub(const IntegerArray a,IntegerArray out,...);
/* For each operation Op(...) we create a temporary result res */
/* and call OpAssign(res,...) */
/* The operations Op(...) are convenient and used in the code */
/* Fill */
DYMOLA_STATIC IntegerArray IntegerFillAssign(IntegerArray res,const Integer t);
DYMOLA_STATIC IntegerArray IntegerFill(const Integer t,SizeType ndims,...);
DYMOLA_STATIC IntegerArray IntegerFillArray(const IntegerArray a,SizeType ndims,...);
DYMOLA_STATIC Integer Integerscalar(const IntegerArray a);
/* Matrix operations not limited to numeric matrices */
DYMOLA_STATIC IntegerArray Integervector(const IntegerArray a);
DYMOLA_STATIC IntegerArray Integermatrix(const IntegerArray a);
DYMOLA_STATIC IntegerArray Integertranspose(const IntegerArray a);
DYMOLA_STATIC IntegerArray Integerouterproduct(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray Integersymmetric(const IntegerArray a);
/* End of common routines for String*/
/* Basic operations, add, subtract, scale*/
/* For each operation Op(...) we create a temporary result res */
/* and call OpAssign(res,...) */
/* The operations Op(...) are convenient and used in the code */
DYMOLA_STATIC IntegerArray IntegerAddAssign(IntegerArray res,const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerAdd(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerSubtractAssign(IntegerArray res,const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerSubtract(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerScaleAssign(IntegerArray res,const IntegerArray a,const Integer t);
DYMOLA_STATIC IntegerArray IntegerScale(const IntegerArray a,const Integer t);
DYMOLA_STATIC IntegerArray IntegerMinusAssign(IntegerArray res,const IntegerArray a);
DYMOLA_STATIC IntegerArray IntegerMinus(const IntegerArray a);
/* sum, max, min, and product */
DYMOLA_STATIC Integer Integersum(const IntegerArray a);
DYMOLA_STATIC Integer IntegerAbssum(const IntegerArray a);
DYMOLA_STATIC Integer Integermax(const IntegerArray a);
DYMOLA_STATIC Integer Integermin(const IntegerArray a);
DYMOLA_STATIC Integer Integerproduct(const IntegerArray a);
DYMOLA_STATIC IntegerArray Integerdiagonal(const IntegerArray a);
DYMOLA_STATIC IntegerArray Integercross(const IntegerArray x,const IntegerArray y);
DYMOLA_STATIC IntegerArray Integerskew(const IntegerArray x);
/* Multiply */
DYMOLA_STATIC IntegerArray IntegerMultiplyMMAssign(IntegerArray res,
const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerMultiplyMVAssign(IntegerArray res,const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerMultiplyVMAssign(IntegerArray res,const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC Integer IntegerMultiplyVV(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerMultiplyMM(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerMultiplyMV(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerMultiplyVM(const IntegerArray a,const IntegerArray b);
DYMOLA_STATIC IntegerArray IntegerIdentity(SizeType n);
DYMOLA_STATIC IntegerArray IntegerPow(const IntegerArray a,const Integer n);
/* For from:stride:to */
DYMOLA_STATIC IntegerArray IntegerRange(const Integer from,const Integer to,const Integer stride);
/* Create Array-temporaries given the dimensions */
/* Construct array from sizes, using variable number of arguments */
DYMOLA_STATIC StringArray StringTemporary(SizeType ndims,...);
/* Special cases for vectors and matrices (most often used) */
DYMOLA_STATIC StringArray StringTemporaryMatrix(SizeType r,SizeType c);
DYMOLA_STATIC StringArray StringTemporaryVector(SizeType r);
/* Assignment */
DYMOLA_STATIC void StringAssign(StringArray a,const StringArray b);
/* Indexing to set/get elements */
DYMOLA_STATIC const char* StringElement(const StringArray a,...);
/* Sizes */
DYMOLA_STATIC Integer StringSize(const StringArray a,SizeType i);
DYMOLA_STATIC IntegerArray StringSizes(const StringArray a);
/* Set element, note that val is prior to the index list. */
DYMOLA_STATIC void SetStringElement(const char* val,StringArray a,...);
/* For debug write out */
DYMOLA_STATIC void StringWrite(const StringArray a);
/* Construct Arrays from other arrays */
DYMOLA_STATIC StringArray StringArrayArray(SizeType narg,StringArray first,...);
/* Construct arrays from scalars */
DYMOLA_STATIC StringArray StringScalarArray(SizeType narg,...);
/* Construct from Record Arrays */
DYMOLA_STATIC StringArray RecordArrayStringSlice(const RealArray a,size_t off);
DYMOLA_STATIC StringArray RecordArrayStringArraySlice(const RealArray a,size_t off);
/* Concatenate along dimensions 'along' (1..ndims) and given nargs arguments */
DYMOLA_STATIC StringArray StringCat(SizeType along,SizeType nargs,StringArray first,...);
/* The helper in Modelica */
DYMOLA_STATIC StringArray StringPromote(const StringArray a,SizeType n);
DYMOLA_STATIC StringArray StringPromoteScalar(const char* x,SizeType n);
/* Actual put of submatrix */
/* Use as: StringGetSub(a,Colon,Index,3,Range,3,4,EndMark)=a[:,3,3:4] */
DYMOLA_STATIC StringArray StringGetSub(const StringArray a,...);
/* Use as StringPutSub(a, out,Index,3,Colon,Range,3,4) yields out[3,:,3:4]=a; */
DYMOLA_STATIC void StringPutSub(const StringArray a,StringArray out,...);
DYMOLA_STATIC StringArray StringCopy(const StringArray a);
/* For each operation Op(...) we create a temporary result res */
/* and call OpAssign(res,...) */
/* The operations Op(...) are convenient and used in the code */
/* Fill */
DYMOLA_STATIC StringArray StringFillAssign(StringArray res,const char* t);
DYMOLA_STATIC StringArray StringFill(const char* t,SizeType ndims,...);
DYMOLA_STATIC StringArray StringFillArray(const StringArray a,SizeType ndims,...);
DYMOLA_STATIC const char* Stringscalar(const StringArray a);
/* Matrix operations not limited to numeric matrices */
DYMOLA_STATIC StringArray Stringvector(const StringArray a);
DYMOLA_STATIC StringArray Stringmatrix(const StringArray a);
DYMOLA_STATIC StringArray Stringtranspose(const StringArray a);
DYMOLA_STATIC StringArray Stringsymmetric(const StringArray a);
/* End of common routines for String*/
static IntegerArray identity(Integer n) {return IntegerIdentity(n);}
static Integer real2integer(Real x) {return (Integer)(floor(x));}
DYMOLA_STATIC Integer VectorWhenHandle(IntegerArray cond,IntegerArray eval,IntegerArray evalnew,Integer Event,Integer*AnyEvent);
DYMOLA_STATIC SizeType CommonNrElements(SizeType ndims, SizeType* dims);
DYMOLA_STATIC SizeType RealNrElements(const RealArray a);
DYMOLA_STATIC SizeType IntegerNrElements(const IntegerArray);
DYMOLA_STATIC SizeType StringNrElements(const StringArray);
/* Convert between C and Fortran storage */
DYMOLA_STATIC RealArray RealSwitchMajor(const RealArray);
DYMOLA_STATIC IntegerArray IntegerSwitchMajor(const IntegerArray);
DYMOLA_STATIC StringArray StringSwitchMajor(const StringArray);
DYMOLA_STATIC void RealSwitchMajorAssign(RealArray,const RealArray);
DYMOLA_STATIC void IntegerSwitchMajorAssign(IntegerArray,const IntegerArray);
DYMOLA_STATIC void StringSwitchMajorAssign(StringArray,const StringArray);
DYMOLA_STATIC RealArray readMatrix(const char*fil,const char*matnam,Integer m,Integer n);
DYMOLA_STATIC IntegerArray readMatrixSize(const char*fil,const char*matnam);
DYMOLA_STATIC StringArray readMATmatrices_M(const char*fil);
DYMOLA_STATIC Integer writeMatrix(const char*fil,const char*matnam,const RealArray mat,int append);
DYMOLA_STATIC Real RealReadScalar(const char*fil,const char*matnam);
DYMOLA_STATIC RealArray RealReadArray(const char*fil,const char*matnam,Integer ndims);
DYMOLA_STATIC Integer IntegerReadScalar(const char*fil,const char*matnam);
DYMOLA_STATIC IntegerArray IntegerReadArray(const char*fil,const char*matnam,Integer ndims);
DYMOLA_STATIC String StringReadScalar(const char*fil,const char*matnam);
DYMOLA_STATIC StringArray StringReadArray(const char*fil,const char*matnam,Integer ndims);
DYMOLA_STATIC RealArray RealLeading(int,int,RealArray);
DYMOLA_STATIC IntegerArray IntegerLeading(int,int,IntegerArray);
DYMOLA_STATIC StringArray StringLeading(int,int,StringArray);
DYMOLA_STATIC void writeArrays(const char*fileName,int nargs,...);
DYMOLA_STATIC char* StringAllocate(SizeType len);
DYMOLA_STATIC String StringAdd(String a,String b);
DYMOLA_STATIC String Real2String(Real x,Integer minwidth,Integer precision);
DYMOLA_STATIC String Integer2String(Integer x,Integer minwidth,Integer precision);
DYMOLA_STATIC String Real2String3(Real x,Integer leftJustified,Integer minwidth,Integer precision);
DYMOLA_STATIC String Real2String2(Real x,Integer leftJustified,Integer minwidth);
DYMOLA_STATIC String Integer2String2(Integer x,Integer leftJustified,Integer minwidth);
DYMOLA_STATIC String Boolean2String2(Integer x,Integer leftJustified,Integer minwidth);
DYMOLA_STATIC String Real2StringFormat(Real x,String s);
DYMOLA_STATIC void CopySlice(Real*x,RealArray y);
DYMOLA_STATIC RealArray RealTemporaryDense(Real*d,SizeType ndims,...);
DYMOLA_STATIC IntegerArray IntegerTemporaryDense(Real*d,SizeType ndims,...);
DYMOLA_STATIC IntegerArray IntegerTemporaryDenseOrig(Integer*d,SizeType ndims,...);
DYMOLA_STATIC StringArray StringTemporaryDense(String*d,SizeType ndims,...);
DYMOLA_STATIC RealArray RealInverse(const RealArray A);
DYMOLA_STATIC IntegerArray NOTArray(const IntegerArray);
DYMOLA_STATIC IntegerArray ANDArray(const IntegerArray,const IntegerArray);
DYMOLA_STATIC IntegerArray ORArray(const IntegerArray,const IntegerArray);
DYMOLA_STATIC String ConvertToNonTempString(String s);
DYMOLA_STATIC StringArray ConvertToNonTempStringArray(StringArray s);
LIBDS_API int dymli2_(int* sysnr, int *fact, double *a, int* lda, int *n,double* b, double *Time, int *Event, int* PrintEvent, double* dwork, int* iwork, int *factor, int* ierr);
#endif
|
key_rand_hash.h | /*
* (C) copyright 2011, Ismael Garcia, (U.Girona/ViRVIG, Spain & INRIA/ALICE, France)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef KEY_RAND_HASH_H_
#define KEY_RAND_HASH_H_
#include <libhu/hash_utils.h>
// ------------------------------------------------------------------
#if (ENABLE_DEVICE_OMP_COMPUTING)
__constant__ unsigned int offsets_k_rand[16] = OFFSETS_TABLE_16;
#elif (ENABLE_DEVICE_CUDA_COMPUTING)
__constant__ unsigned int offsets_k_rand[16] = OFFSETS_TABLE_16;
#elif (ENABLE_DEVICE_OPENCL_COMPUTING)
__constant__ unsigned int offsets_k_rand[16] = OFFSETS_TABLE_16;
#endif
class key_rand_hash_functor : libhu::key_hash_functor
{
public:
key_rand_hash_functor()
{
}
typedef libhu::U32 T_KEY;
typedef libhu::U32 T_AGE;
typedef libhu::U32 T_MAX_AGE;
typedef libhu::U32 T_LOC;
typedef libhu::U32 T_HASH_TABLE;
static const libhu::U32 DEFAULT_GROUP_SIZE;
static const libhu::U32 KEY_TYPE_BITS;
static const libhu::U32 KEY_TYPE_MASK;
static const libhu::U32 PACKED_KEY_TYPE_MASK;
static const libhu::U32 KEY_TYPE_RANGE;
static const libhu::U32 UNDEFINED_KEY;
static const libhu::U32 PACKED_UNDEFINED_KEY;
static const libhu::U32 KEY_TYPE_AGE_MASK;
static const libhu::U32 KEY_TYPE_AGE_BITS;
static const libhu::U32 KEY_TYPE_INIT_AGE;
static const libhu::U32 KEY_TYPE_NULL_AGE;
static const libhu::U32 KEY_TYPE_MAX_AGE;
static const libhu::U32 KEY_TYPE_MAX_AGE_MASK;
static const libhu::U32 KEY_TYPE_MAX_AGE_BITS;
static const libhu::U32 HTABLE_ID;
static const libhu::U32 NOP_MODE_TRUE;
static const libhu::U32 NOP_MODE_FALSE;
libhu::UPTR hash_tableUPtr;
libhu::U32* max_tableUPtr;
libhu::U32 hash_table_size;
__inline__ HOST DEVICE T_LOC WRAP(T_LOC A, T_LOC B) { return ((A) % (B)); }
__inline__ HOST DEVICE T_KEY GET_KEY_POS(T_HASH_TABLE k) { return ((k) & KEY_TYPE_MASK); }
__inline__ HOST DEVICE T_KEY GET_KEY_ATTACH_ID(T_HASH_TABLE k) { return ((k) & KEY_TYPE_MASK); }
__inline__ HOST DEVICE T_AGE GET_KEY_AGE(T_HASH_TABLE k) { return ((k) >> KEY_TYPE_BITS); }
__inline__ HOST DEVICE T_MAX_AGE GET_KEY_MAX_AGE(T_HASH_TABLE k) { return ((k) >> KEY_TYPE_BITS); }
__inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS(T_KEY p) { return ((p) & KEY_TYPE_MASK); }
__inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS_AND_AGE(T_KEY p, T_AGE a) { return (((a << KEY_TYPE_BITS)) + (p & KEY_TYPE_MASK)); }
__inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS_AND_MAX_AGE(T_KEY p, T_MAX_AGE m) { return (((m << KEY_TYPE_BITS)) + (p & KEY_TYPE_MASK)); }
// Hash function
__inline__ HOST DEVICE T_LOC h(const T_KEY K, const T_AGE AGE, const libhu::U32 HSZ)
{
#if (ENABLE_HOST_COMPUTING)
static unsigned int offsets_k_rand[16] = OFFSETS_TABLE_16;
#endif
return WRAP((K + (AGE + 1u) * (K * 101449u) + (offsets_k_rand[AGE] * 679681u)), HSZ);
}
// Max. age operator to update hash_table
__inline__ HOST DEVICE
void operator()(T_HASH_TABLE& t)
{
libhu::U32 i = (((libhu::UPTR)thrust::raw_pointer_cast(&t)) - ((libhu::UPTR)hash_tableUPtr)) / (sizeof(T_HASH_TABLE));
if (t != PACKED_UNDEFINED_KEY)
{
t = PACK_KEY_POS_AND_MAX_AGE(GET_KEY_POS(t), max_tableUPtr[i]);
}
}
template<typename T_KEY,
typename T_HASH_TABLE,
typename T_MAX_AGE,
typename T_HASH_FUNCTOR,
typename T_MAX_AGE_COMPUTATION_FUNCTOR>
__inline__ HOST DEVICE
void hash_kernel(libhu::U32 keys_size,
libhu::U32 hash_table_size,
T_KEY keys[],
T_HASH_TABLE hash_table[],
T_MAX_AGE max_table[],
T_HASH_FUNCTOR hf,
T_MAX_AGE_COMPUTATION_FUNCTOR maf)
{
#if (ENABLE_DEVICE_OMP_COMPUTING)
#if (OMP_CUSTOM_OPTIONS_ON_RUNTIME)
omp_set_num_threads(OMP_CUSTOM_NUM_THREADS);
omp_set_schedule(omp_sched_dynamic,OMP_CUSTOM_CHUNK_SIZE);
#pragma omp parallel for num_threads(OMP_CUSTOM_NUM_THREADS),schedule(runtime)
for (libhu::S32 GTID = 0; GTID < keys_size; GTID++)
#else
#pragma omp parallel for num_threads(OMP_CUSTOM_NUM_THREADS),schedule(dynamic, OMP_CUSTOM_CHUNK_SIZE)
for (libhu::S32 GTID = 0; GTID < keys_size; GTID++)
#endif
#endif
#if (ENABLE_HOST_COMPUTING)
for (libhu::S32 GTID = 0; GTID < keys_size; GTID++)
#endif
{
libhu::U32 LOC;
libhu::U32 ROOT_LOC;
libhu::U8 AGE = KEY_TYPE_NULL_AGE;
T_HASH_TABLE EVICTED_PKEY;
T_HASH_TABLE PKEY = (GTID < keys_size) ? PACK_KEY_POS_AND_MAX_AGE(keys[ (GTID) ], KEY_TYPE_INIT_AGE) : PACKED_UNDEFINED_KEY;
AGE = (GTID < keys_size) ? KEY_TYPE_NULL_AGE : KEY_TYPE_MAX_AGE;
while (AGE < KEY_TYPE_MAX_AGE)
{
LOC = h(GET_KEY_POS(PKEY), AGE, hash_table_size);
EVICTED_PKEY = libhu::atomicMaxU32(&hash_table[ (LOC) ], PKEY );
if (EVICTED_PKEY < PKEY)
{
maf.update_max_age(hash_table_size, PKEY, AGE, max_table, hf);
if (GET_KEY_AGE(EVICTED_PKEY) > 0u)
{
PKEY = EVICTED_PKEY;
AGE = GET_KEY_AGE(EVICTED_PKEY);
}
else
{
break;
}
}
else
{
AGE++;
PKEY = PACK_KEY_POS_AND_MAX_AGE(GET_KEY_POS(PKEY), AGE);
}
}
}
}
};
const libhu::U32 key_rand_hash_functor::DEFAULT_GROUP_SIZE = 192u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_BITS = 28u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_MASK = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u );
const libhu::U32 key_rand_hash_functor::PACKED_KEY_TYPE_MASK = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u );
const libhu::U32 key_rand_hash_functor::KEY_TYPE_RANGE = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 2u );
const libhu::U32 key_rand_hash_functor::UNDEFINED_KEY = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u );
const libhu::U32 key_rand_hash_functor::PACKED_UNDEFINED_KEY = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u );
const libhu::U32 key_rand_hash_functor::KEY_TYPE_AGE_MASK = 15u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_AGE_BITS = 4u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_INIT_AGE = 1u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_NULL_AGE = 0u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_MAX_AGE = 16u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_MAX_AGE_MASK = 4u;
const libhu::U32 key_rand_hash_functor::KEY_TYPE_MAX_AGE_BITS = 4u;
const libhu::U32 key_rand_hash_functor::HTABLE_ID = 0u;
const libhu::U32 key_rand_hash_functor::NOP_MODE_TRUE = 255u;
const libhu::U32 key_rand_hash_functor::NOP_MODE_FALSE = 0u;
#endif
|
attr-simd-2.c | /* { dg-do compile } */
/* { dg-options "-fdump-tree-optimized -fopenmp-simd" } */
#pragma omp declare simd
extern
#ifdef __cplusplus
"C"
#endif
__attribute__((__simd__))
int simd_attr (void)
{
return 0;
}
/* { dg-final { scan-tree-dump "omp declare simd" "optimized" } } */
/* { dg-final { scan-assembler-times "_ZGVbN4_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVbM4_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVcN4_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVcM4_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVdN8_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVdM8_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVeN16_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
/* { dg-final { scan-assembler-times "_ZGVeM16_simd_attr:" 1 { target { i?86-*-* x86_64-*-* } } } } */
|
cg_precond_jacobi.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "nb/memory_bot.h"
#include "nb/solver_bot/sparse/sparse.h"
#include "nb/solver_bot/sparse/solvers/cg_precond_jacobi.h"
#include "../sparse_struct.h"
int nb_sparse_solve_CG_precond_Jacobi
(const nb_sparse_t *const A,
const double *const b,
double *_x, /* Out */
uint32_t max_iter, double tolerance,
uint32_t* niter_performed, /* Out (NULL if not required) */
double* tolerance_reached, /* Out (NULL if not required) */
uint32_t omp_parallel_threads)
/* Return the num of iterations */
{
/* Solve Ax = b with Conjugate Gradient preconditioned with jacobi */
uint32_t vec_size = A->N * sizeof(double);
char *memblock = nb_allocate_zero_mem(5 * vec_size);
double* g = (void*) memblock;
double* p = (void*) (memblock + vec_size);
double* q = (void*) (memblock + 2 * vec_size);
double* w = (void*) (memblock + 3 * vec_size);
double* Aii = (void*) (memblock + 4 * vec_size);
double dot_gg = 0;
#pragma omp parallel for reduction(+:dot_gg) num_threads(omp_parallel_threads) schedule(guided)
for (uint32_t i = 0; i < A->N; i++) {
double sum = 0;
for(uint32_t j=0; j< A->rows_size[i]; j++)
sum += A->rows_values[i][j] * _x[A->rows_index[i][j]];
g[i] = sum - b[i];
Aii[i] = nb_sparse_get(A,i,i);
q[i] = g[i]/Aii[i];
p[i] = -q[i];
dot_gg += g[i]*g[i];
}
uint32_t k = 0;
while (dot_gg > tolerance * tolerance && k < max_iter) {
double dot_pw = 0;
double dot_gq = 0;
dot_gg = 0;
#pragma omp parallel for reduction(+:dot_pw, dot_gg, dot_gq) num_threads(omp_parallel_threads)
for (uint32_t i = 0; i < A->N; i++) {
w[i] = 0;
for (uint32_t j = 0; j < A->rows_size[i]; j++)
w[i] += A->rows_values[i][j] * p[A->rows_index[i][j]];
dot_pw += p[i]*w[i];
dot_gg += g[i]*g[i];
dot_gq += g[i]*q[i];
}
double alphak = dot_gq/dot_pw;
double dot_gkqk = 0;
#pragma omp parallel for reduction(+:dot_gkqk) num_threads(omp_parallel_threads)
for (uint32_t i=0; i< A->N; i++) {
_x[i] += alphak*p[i];
g[i] += alphak*w[i];
q[i] = g[i]/Aii[i];
dot_gkqk += g[i]*q[i];
}
double betak = dot_gkqk/dot_gq;
#pragma omp parallel for num_threads(omp_parallel_threads)
for (uint32_t i=0; i< A->N; i++)
p[i] = -q[i]+betak*p[i];
k++;
}
/* Free memory */
nb_free_mem(memblock);
if (NULL != niter_performed)
niter_performed[0]= k;
if (NULL != tolerance_reached)
*tolerance_reached = sqrt(dot_gg);
if (dot_gg > tolerance*tolerance)
return 1;
return 0;
}
|
critical-unrelated.c | /*
* critical-unrelated.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(8) shared(var)
{
#pragma omp critical
{
// Dummy region.
}
var++;
}
fprintf(stderr, "DONE\n");
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}critical-unrelated.c:29
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
compute.h | inline void compute(float* a, int size)
{
#pragma omp parallel for
for (int i = 0; i < size; i++)
a[i] *= 2.0f;
}
|
MeshGraph.h | /// \ingroup base
/// \class ttk::MeshGraph
/// \author Jonas Lukasczyk (jl@jluk.de)
/// \date 01.12.2018
///
/// \brief TTK %meshGraph processing package.
///
/// %MeshGraph is a TTK processing package that generates for each one dimensional cell (edge) a two dimensional cell by mapping a size value to the width of the input cell. The output is a set of either quadratic cells or linear polygons.
///
/// This filter supports two modes:
///
/// 1) Each cell (a,b) is mapped to two quadric quads:
///
/// a-------------------b
///
/// a1--m1a1--m1--b1m1--b1
/// | | |
/// a c b
/// | | |
/// a0--a0m0--m0--m0b0--b0
///
/// 2) Each cell (a,b) is subdivided into a linear polygon
///
/// a----------------------b
///
/// a1--s0u--s1u-- ... b1
/// | |
/// a0--s0d--s1d-- ... b0
///
#pragma once
// base code includes
#include <Wrapper.h>
using namespace std;
namespace ttk{
class MeshGraph : public Debug{
public:
MeshGraph(){};
~MeshGraph(){};
inline size_t computeNumberOfOutputPoints(
const size_t& nInputPoints,
const size_t& nInputCells,
const bool& useQuadraticCells,
const size_t& nSubdivisions=0
) const{
return useQuadraticCells
? nInputPoints*3 + nInputCells*7 // 3 per input point (a -> a,a0,a1) + 7 per input cell (a0m0,m0,m0b0,b1m1,m1,m1a1,c)
: nInputPoints*2 + nInputCells*(nSubdivisions*2); // 2 per input point (a -> a0,a1) + 2 per cell subdivision (sIu+sId)
};
inline size_t computeNumberOfOutputCells(
const size_t& nInputCells,
const bool& useQuadraticCells
) const{
return useQuadraticCells
? nInputCells*2 // each cell gets converted into two quadratic quads
: nInputCells; // each cell gets converted into a one cell with multiple points
};
inline size_t computeOutputCellSize(
const size_t& nSubdivisions
) const{
return 5+nSubdivisions*2; // cellDim + 4 corners + 2 for each subdivision
};
inline size_t computeOutputTopologySize(
const size_t& nInputCells,
const bool& useQuadraticCells,
const size_t& nSubdivisions=0
) const{
return useQuadraticCells
? nInputCells*18 // 2*cellDim + 8 corners (a0,m0,m1,a1, m0,b0,b1,m1) + 8 mid-edge nodes (a0m0,c,m1a1,a, m0b0,b,b1m1,c)
: nInputCells*this->computeOutputCellSize( nSubdivisions );
};
// Mesh graph with quadratic quads
template <typename topoType, typename sizeType> int execute(
// Input
const float* inputPoints,
const topoType* inputTopology,
const size_t& nInputPoints,
const size_t& nInputCells,
const sizeType* inputPointSizes,
const float& sizeScale,
const size_t& sizeAxis,
// Output
float* outputPoints,
topoType* outputTopology
) const;
// Mesh graph with linear polygon
template <typename topoType, typename sizeType> int execute2(
// Input
const float* inputPoints,
const topoType* inputTopology,
const size_t nInputPoints,
const size_t nInputCells,
const size_t nSubdivisions,
const sizeType* inputPointSizes,
const float sizeScale,
const size_t sizeAxis,
// Output
float* outputPoints,
topoType* outputTopology
) const;
// Map input point data to output point data
template <typename topoType, typename dataType> int mapInputPointDataToOutputPointData(
const topoType* inputTopology,
const size_t& nInputPoints,
const size_t& nInputCells,
const dataType* inputPointData,
dataType* outputPointData,
const bool& useQuadraticCells,
const size_t& nSubdivisions=0
) const;
// Map input point data to output point data
template <typename topoType, typename dataType> int mapInputCellDataToOutputCellData(
const size_t& nInputCells,
const dataType* inputCellData,
dataType* outputCellData,
const bool& useQuadraticCells,
const size_t& nSubdivisions=0
) const;
};
}
// =============================================================================
// Version 1: Mesh Graph with Quadratic Quads
// =============================================================================
template <typename topoType, typename sizeType> int ttk::MeshGraph::execute(
// Input
const float* inputPoints,
const topoType* inputTopology,
const size_t& nInputPoints,
const size_t& nInputCells,
const sizeType* inputPointSizes,
const float& sizeScale,
const size_t& sizeAxis,
// Output
float* outputPoints,
topoType* outputTopology
) const{
Timer t;
double t0=0;
// Print Input
{
stringstream msg;
msg << "[ttkMeshGraph] Computing quadratic quads for graph with" << endl
<< "[ttkMeshGraph] - "<< nInputPoints << " points" << endl
<< "[ttkMeshGraph] - "<< nInputCells << " edges" << endl;
dMsg(cout, msg.str(), infoMsg);
}
auto getInputPointData =
[](
const size_t& pointIndex,
const float* inputPoints,
const sizeType* inputPointSizes,
const float& sizeScale,
float data[4]
) {
size_t i= pointIndex*3;
data[0] = inputPoints[i++];
data[1] = inputPoints[i++];
data[2] = inputPoints[i];
data[3] = (inputPointSizes!=nullptr)?((float) inputPointSizes[pointIndex]) * sizeScale:sizeScale;
};
// -------------------------------------------------------------------------
// Compute Output Point Locations
// -------------------------------------------------------------------------
// outputPoints: [
// 3 per input point (a,a0,a1,b,b0,b1,...),
// 7 per input cell (m0,m1,a0m0,m0b0,b1m1,m1a1,c...)
// ]
size_t edgePointOffset = 3*nInputPoints;
{
dMsg(cout, "[ttkMeshGraph] Computing output points ... ", timeMsg);
t0 = t.getElapsedTime();
// ---------------------------------------------------------------------
// Compute points that result from input points
// ---------------------------------------------------------------------
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputPoints; i++){
float data[4];
getInputPointData(i, inputPoints, inputPointSizes, sizeScale, data);
size_t q = i*9; // i*3*3
// a
outputPoints[q ] = data[0];
outputPoints[q+1] = data[1];
outputPoints[q+2] = data[2];
// a0
outputPoints[q+3] = data[0];
outputPoints[q+4] = data[1];
outputPoints[q+5] = data[2];
// a1
outputPoints[q+6] = data[0];
outputPoints[q+7] = data[1];
outputPoints[q+8] = data[2];
// Move a0 and a1 along size axis
outputPoints[q+3+sizeAxis] += data[3]/2;
outputPoints[q+6+sizeAxis] -= data[3]/2;
}
// ---------------------------------------------------------------------
// Compute points that result from edges
// ---------------------------------------------------------------------
// Lambda function that linearly interpolates two point locations
auto getMidPoint = [](const size_t& m, const size_t& i, const size_t& j, float* outputPoints){
size_t mp = m*3;
size_t ip = i*3;
size_t jp = j*3;
for(size_t i=0; i<3; i++)
outputPoints[ mp+i ] = (outputPoints[ ip+i ] + outputPoints[ jp+i ])/2;
};
// Lambda function that computes the output location of a mid point on a bezier curve
auto getMidPoint2 = [](const size_t& m, const size_t& p0, const size_t& p1, const size_t& sizeAxis, float* outputPoints){
auto bezierPoint = [](float& m, const float& p0, const float& p2){
m = 0.5*(0.5*p0+0.5*m)+0.5*(0.5*m+0.5*p2);
};
size_t mi = m*3;
size_t p0i = p0*3; // first point
size_t p1i = p1*3; // second point
for(size_t i=0; i<3; i++)
outputPoints[ mi+i ] = (outputPoints[ p0i+i ] + outputPoints[ p1i+i ])/2;
outputPoints[ mi+sizeAxis ] = outputPoints[ p0i+sizeAxis ];
for(size_t i=0; i<3; i++)
bezierPoint(
outputPoints[ mi+i ],
outputPoints[ p0i+i ],
outputPoints[ p1i+i ]
);
};
// Iterate over input cells and generate new points
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t temp = i*3+1;
size_t aInputIndex = (size_t) inputTopology[ temp++ ];
size_t bInputIndex = (size_t) inputTopology[ temp ];
// already computed points
size_t a = aInputIndex*3;
size_t a0 = a+1;
size_t a1 = a+2;
size_t b = bInputIndex*3;
size_t b0 = b+1;
size_t b1 = b+2;
// points to compute
size_t offset = edgePointOffset + i*7;
size_t m0 = offset;
size_t m1 = offset+1;
size_t a0m0 = offset+2;
size_t m0b0 = offset+3;
size_t b1m1 = offset+4;
size_t m1a1 = offset+5;
size_t c = offset+6;
getMidPoint(m0, a0, b0, outputPoints);
getMidPoint(m1, a1, b1, outputPoints);
getMidPoint(c, m0, m1, outputPoints);
getMidPoint2(a0m0, a0, m0, sizeAxis, outputPoints);
getMidPoint2(m0b0, b0, m0, sizeAxis, outputPoints);
getMidPoint2(b1m1, b1, m1, sizeAxis, outputPoints);
getMidPoint2(m1a1, a1, m1, sizeAxis, outputPoints);
}
// Print Status
{
stringstream msg;
msg << "done ("<<(t.getElapsedTime()-t0)<<" s)."<<endl;
dMsg(cout, msg.str(), timeMsg);
}
}
// -------------------------------------------------------------------------
// Compute Output Cells
// -------------------------------------------------------------------------
{
dMsg(cout, "[ttkMeshGraph] Computing output cells ... ", timeMsg);
t0 = t.getElapsedTime();
topoType edgePointOffset_ = (topoType) edgePointOffset;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t temp = i*3+1;
topoType aInputIndex = inputTopology[ temp++ ];
topoType bInputIndex = inputTopology[ temp ];
// get point indicies
topoType a = aInputIndex*3;
topoType a0 = a+1;
topoType a1 = a+2;
topoType b = bInputIndex*3;
topoType b0 = b+1;
topoType b1 = b+2;
topoType i_ = (topoType) i;
topoType offset = edgePointOffset_ + i_*7;
topoType m0 = offset;
topoType m1 = offset+1;
topoType a0m0 = offset+2;
topoType m0b0 = offset+3;
topoType b1m1 = offset+4;
topoType m1a1 = offset+5;
topoType c = offset+6;
// output cell offset
size_t q = i*18;
// first quadratic quad
outputTopology[q++] = 8;
outputTopology[q++] = a0;
outputTopology[q++] = m0;
outputTopology[q++] = m1;
outputTopology[q++] = a1;
outputTopology[q++] = a0m0;
outputTopology[q++] = c;
outputTopology[q++] = m1a1;
outputTopology[q++] = a;
// second quadratic quad
outputTopology[q++] = 8;
outputTopology[q++] = m0;
outputTopology[q++] = b0;
outputTopology[q++] = b1;
outputTopology[q++] = m1;
outputTopology[q++] = m0b0;
outputTopology[q++] = b;
outputTopology[q++] = b1m1;
outputTopology[q++] = c;
}
// Print Status
{
stringstream msg;
msg << "done ("<<(t.getElapsedTime()-t0)<<" s)."<<endl;
dMsg(cout, msg.str(), timeMsg);
}
}
return 1;
}
// =============================================================================
// Version 2: Mesh Graph with Linear Polygon
// =============================================================================
template <typename topoType, typename sizeType> int ttk::MeshGraph::execute2(
// Input
const float* inputPoints,
const topoType* inputTopology,
const size_t nInputPoints,
const size_t nInputCells,
const size_t nSubdivisions,
const sizeType* inputPointSizes,
const float sizeScale,
const size_t sizeAxis,
// Output
float* outputPoints,
topoType* outputTopology
) const{
Timer t;
double t0=0;
// Print Input
{
stringstream msg;
msg << "[ttkMeshGraph] Computing linear polygon for graph with" << endl
<< "[ttkMeshGraph] - "<< nInputPoints << " points" << endl
<< "[ttkMeshGraph] - "<< nInputCells << " edges" << endl
<< "[ttkMeshGraph] - "<< nSubdivisions << " subdivisions" << endl;
dMsg(cout, msg.str(), infoMsg);
}
auto getInputPointData =
[](
const size_t& pointIndex,
const float* inputPoints,
const sizeType* inputPointSizes,
const float& sizeScale,
float data[4]
) {
size_t i= pointIndex*3;
data[0] = inputPoints[i++];
data[1] = inputPoints[i++];
data[2] = inputPoints[i];
data[3] = (inputPointSizes!=nullptr)?((float) inputPointSizes[pointIndex]) * sizeScale : sizeScale;
};
size_t subdivisionOffset = nInputPoints*2;
size_t nSubdivisionPoints = nSubdivisions*2;
size_t outputPointsSubdivisonOffset = nSubdivisionPoints*3;
// -------------------------------------------------------------------------
// Compute Output Point Locations
// -------------------------------------------------------------------------
// outputPoints: [
// corners: 2*inputPoints in inputPoints order;
// SubPoints: 2 per subdivison in cell order]
// ]
{
dMsg(cout, "[ttkMeshGraph] Computing output points ... ", timeMsg);
t0 = t.getElapsedTime();
// ---------------------------------------------------------------------
// Compute Corners
// ---------------------------------------------------------------------
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputPoints; i++){
float data[4];
getInputPointData(i, inputPoints, inputPointSizes, sizeScale, data);
size_t q = i*6;
outputPoints[q ] = data[0];
outputPoints[q+1] = data[1];
outputPoints[q+2] = data[2];
outputPoints[q+3] = data[0];
outputPoints[q+4] = data[1];
outputPoints[q+5] = data[2];
outputPoints[q+ sizeAxis] += data[3]/2;
outputPoints[q+3+sizeAxis] -= data[3]/2;
}
// ---------------------------------------------------------------------
// Compute SubPoints
// ---------------------------------------------------------------------
size_t q = subdivisionOffset*3;
float nSubdivisionsP1 = nSubdivisions+1;
auto computeBezierPoint = [](
float* outputPoints,
const size_t& no0,
const size_t& no1,
const size_t& sizeAxis,
const size_t& subdivisionOffset,
const float lambda
){
float lambdaI = 1 - lambda;
float lambda_2 = lambda*lambda;
float lambda_3 = lambda*lambda_2;
float lambdaI_2 = lambdaI*lambdaI;
float lambdaI_3 = lambdaI*lambdaI_2;
float m0[3];
float m1[3];
for(size_t i=0; i<3; i++){
m0[i] = (outputPoints[no0+i]+outputPoints[no1+i])/2;
m1[i] = m0[i];
}
m0[sizeAxis] = outputPoints[no0+sizeAxis];
m1[sizeAxis] = outputPoints[no1+sizeAxis];
for(size_t i=0; i<3; i++)
outputPoints[subdivisionOffset+i] = lambdaI_3*outputPoints[no0+i] + 3*lambdaI_2*lambda*m0[i] + 3*lambdaI*lambda_2*m1[i] + lambda_3*outputPoints[no1+i];
};
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t temp = i*3+1;
size_t n0 = (size_t) inputTopology[ temp++ ];
size_t n1 = (size_t) inputTopology[ temp ];
size_t no0 = n0*6;
size_t no1 = n1*6;
size_t q2 = q + i*outputPointsSubdivisonOffset;
for(float j=1; j<=nSubdivisions; j++){
computeBezierPoint(
outputPoints,
no0,
no1,
sizeAxis,
q2,
j/nSubdivisionsP1
);
computeBezierPoint(
outputPoints,
no0+3,
no1+3,
sizeAxis,
q2+3,
j/nSubdivisionsP1
);
q2 += 6;
}
}
// Print Status
{
stringstream msg;
msg << "done ("<<(t.getElapsedTime()-t0)<<" s)."<<endl;
dMsg(cout, msg.str(), timeMsg);
}
}
// -------------------------------------------------------------------------
// Compute Output Cells
// -------------------------------------------------------------------------
{
dMsg(cout, "[ttkMeshGraph] Computing output cells ... ", timeMsg);
t0 = t.getElapsedTime();
size_t cellSize = this->computeOutputCellSize( nSubdivisions );
topoType cellDim = ((topoType)cellSize)-1;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t q = i*3+1;
topoType in0 = inputTopology[q++]*2;
topoType in1 = inputTopology[q]*2;
topoType c0 = in0;
topoType c1 = in0+1;
topoType c2 = in1+1;
topoType c3 = in1;
size_t q2 = cellSize*i;
outputTopology[q2++] = cellDim;
outputTopology[q2++] = c0;
outputTopology[q2++] = c1;
size_t temp = subdivisionOffset + i*nSubdivisionPoints;
for(size_t j=0; j<nSubdivisions; j++)
outputTopology[q2++] = (topoType) (temp + j*2 + 1);
outputTopology[q2++] = c2;
outputTopology[q2++] = c3;
for(int j=nSubdivisions-1; j>=0; j--)
outputTopology[q2++] = (topoType) (temp + j*2);
}
// Print Status
{
stringstream msg;
msg << "done ("<<(t.getElapsedTime()-t0)<<" s)."<<endl;
dMsg(cout, msg.str(), timeMsg);
}
}
return 1;
}
// =============================================================================
// Map input point data to output point data
// =============================================================================
template <typename topoType, typename dataType> int ttk::MeshGraph::mapInputPointDataToOutputPointData(
const topoType* inputTopology,
const size_t& nInputPoints,
const size_t& nInputCells,
const dataType* inputPointData,
dataType* outputPointData,
const bool& useQuadraticCells,
const size_t& nSubdivisions
) const {
if(useQuadraticCells){
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputPoints; i++){
size_t q = i*3;
// a, a0, a1
outputPointData[q ] = inputPointData[i];
outputPointData[q+1] = inputPointData[i];
outputPointData[q+2] = inputPointData[i];
}
size_t edgePointOffset = nInputPoints*3;
// Iterate over input cells and assign point data of intermediate points
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t temp = i*3+1;
size_t aInputIndex = (size_t) inputTopology[ temp++ ];
size_t bInputIndex = (size_t) inputTopology[ temp ];
size_t a = aInputIndex*3;
size_t b = bInputIndex*3;
size_t offset = edgePointOffset + i*7;
size_t m0 = offset;
size_t m1 = offset+1;
size_t a0m0 = offset+2;
size_t m0b0 = offset+3;
size_t b1m1 = offset+4;
size_t m1a1 = offset+5;
size_t c = offset+6;
outputPointData[c] = (dataType) ((outputPointData[a]+outputPointData[b])/2);
outputPointData[m0] = outputPointData[c];
outputPointData[m1] = outputPointData[c];
outputPointData[a0m0] = (dataType) ((outputPointData[a]+outputPointData[c])/2);
outputPointData[m1a1] = outputPointData[a0m0];
outputPointData[m0b0] = (dataType) ((outputPointData[c]+outputPointData[b])/2);
outputPointData[b1m1] = outputPointData[m0b0];
}
} else {
// Corners
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputPoints; i++){
size_t offset = i*2;
auto& v = inputPointData[i];
outputPointData[offset ] = v;
outputPointData[offset+1] = v;
}
// Intermediate Points
size_t subdivisionOffset = nInputPoints*2;
size_t nSubdivisionPoints = nSubdivisions*2;
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t q = i*3+1;
topoType c0 = inputTopology[q ];
dataType c0V = inputPointData[c0];
size_t temp = subdivisionOffset + i*nSubdivisionPoints;
for(size_t j=0; j<nSubdivisions; j++){
size_t q2 = temp + j*2;
outputPointData[ q2 ] = c0V;
outputPointData[ q2+1 ] = c0V;
}
}
}
return 1;
};
// =============================================================================
// Map input cell data to output cell data
// =============================================================================
template <typename topoType, typename dataType> int ttk::MeshGraph::mapInputCellDataToOutputCellData(
const size_t& nInputCells,
const dataType* inputCellData,
dataType* outputCellData,
const bool& useQuadraticCells,
const size_t& nSubdivisions
) const {
if(useQuadraticCells){
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
size_t offset = i*2;
outputCellData[offset ] = inputCellData[i];
outputCellData[offset+1] = inputCellData[i];
}
} else {
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(size_t i=0; i<nInputCells; i++){
outputCellData[i] = inputCellData[i];
}
}
return 1;
};
|
cyclic_reduction.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.17 $
***********************************************************************EHEADER*/
/******************************************************************************
* Cyclic reduction algorithm (coded as if it were a 1D MG method)
*
*****************************************************************************/
#include "_hypre_struct_ls.h"
#define DEBUG 0
/*--------------------------------------------------------------------------
* Macros
*--------------------------------------------------------------------------*/
#define hypre_CycRedSetCIndex(base_index, base_stride, level, cdir, cindex) \
{ \
if (level > 0) \
hypre_SetIndex(cindex, 0, 0, 0); \
else \
hypre_CopyIndex(base_index, cindex); \
hypre_IndexD(cindex, cdir) += 0; \
}
#define hypre_CycRedSetFIndex(base_index, base_stride, level, cdir, findex) \
{ \
if (level > 0) \
hypre_SetIndex(findex, 0, 0, 0); \
else \
hypre_CopyIndex(base_index, findex); \
hypre_IndexD(findex, cdir) += 1; \
}
#define hypre_CycRedSetStride(base_index, base_stride, level, cdir, stride) \
{ \
if (level > 0) \
hypre_SetIndex(stride, 1, 1, 1); \
else \
hypre_CopyIndex(base_stride, stride); \
hypre_IndexD(stride, cdir) *= 2; \
}
/*--------------------------------------------------------------------------
* hypre_CyclicReductionData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
MPI_Comm comm;
HYPRE_Int num_levels;
HYPRE_Int cdir; /* coarsening direction */
hypre_Index base_index;
hypre_Index base_stride;
hypre_StructGrid **grid_l;
hypre_BoxArray *base_points;
hypre_BoxArray **fine_points_l;
double *data;
hypre_StructMatrix **A_l;
hypre_StructVector **x_l;
hypre_ComputePkg **down_compute_pkg_l;
hypre_ComputePkg **up_compute_pkg_l;
HYPRE_Int time_index;
HYPRE_Int solve_flops;
} hypre_CyclicReductionData;
/*--------------------------------------------------------------------------
* hypre_CyclicReductionCreate
*--------------------------------------------------------------------------*/
void *
hypre_CyclicReductionCreate( MPI_Comm comm )
{
hypre_CyclicReductionData *cyc_red_data;
cyc_red_data = hypre_CTAlloc(hypre_CyclicReductionData, 1);
(cyc_red_data -> comm) = comm;
(cyc_red_data -> cdir) = 0;
(cyc_red_data -> time_index) = hypre_InitializeTiming("CyclicReduction");
/* set defaults */
hypre_SetIndex((cyc_red_data -> base_index), 0, 0, 0);
hypre_SetIndex((cyc_red_data -> base_stride), 1, 1, 1);
return (void *) cyc_red_data;
}
/*--------------------------------------------------------------------------
* hypre_CycRedCreateCoarseOp
*
* NOTE: This routine assumes that domain boundary ghost zones (i.e., ghost
* zones that do not intersect the grid) have the identity equation in them.
* This is currently insured by the MatrixAssemble routine.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_CycRedCreateCoarseOp( hypre_StructMatrix *A,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *Ac;
hypre_Index *Ac_stencil_shape;
hypre_StructStencil *Ac_stencil;
HYPRE_Int Ac_stencil_size;
HYPRE_Int Ac_stencil_dim;
HYPRE_Int Ac_num_ghost[] = {0, 0, 0, 0, 0, 0};
HYPRE_Int i;
HYPRE_Int stencil_rank;
Ac_stencil_dim = 1;
/*-----------------------------------------------
* Define Ac_stencil
*-----------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------
* non-symmetric case:
*
* 3 point fine grid stencil produces 3 point Ac
*-----------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
Ac_stencil_size = 3;
Ac_stencil_shape = hypre_CTAlloc(hypre_Index, Ac_stencil_size);
for (i = -1; i < 2; i++)
{
/* Storage for 3 elements (c,w,e) */
hypre_SetIndex(Ac_stencil_shape[stencil_rank],i,0,0);
stencil_rank++;
}
}
/*-----------------------------------------------
* symmetric case:
*
* 3 point fine grid stencil produces 3 point Ac
*
* Only store the lower triangular part + diagonal = 2 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicalgraphic ordering.
*-----------------------------------------------*/
else
{
Ac_stencil_size = 2;
Ac_stencil_shape = hypre_CTAlloc(hypre_Index, Ac_stencil_size);
for (i = -1; i < 1; i++)
{
/* Storage for 2 elements in (c,w) */
hypre_SetIndex(Ac_stencil_shape[stencil_rank],i,0,0);
stencil_rank++;
}
}
Ac_stencil = hypre_StructStencilCreate(Ac_stencil_dim, Ac_stencil_size,
Ac_stencil_shape);
Ac = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, Ac_stencil);
hypre_StructStencilDestroy(Ac_stencil);
/*-----------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------*/
hypre_StructMatrixSymmetric(Ac) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------
* Set number of ghost points
*-----------------------------------------------*/
Ac_num_ghost[2*cdir] = 1;
if (!hypre_StructMatrixSymmetric(A))
{
Ac_num_ghost[2*cdir + 1] = 1;
}
hypre_StructMatrixSetNumGhost(Ac, Ac_num_ghost);
hypre_StructMatrixInitializeShell(Ac);
return Ac;
}
/*--------------------------------------------------------------------------
* hypre_CycRedSetupCoarseOp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CycRedSetupCoarseOp( hypre_StructMatrix *A,
hypre_StructMatrix *Ac,
hypre_Index cindex,
hypre_Index cstride )
{
hypre_Index index;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int fi, ci;
hypre_Box *A_dbox;
hypre_Box *Ac_dbox;
double *a_cc, *a_cw, *a_ce;
double *ac_cc, *ac_cw, *ac_ce;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int xOffsetA;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(Ac);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
Ac_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(Ac), ci);
/*-----------------------------------------------
* Extract pointers for 3-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
*-----------------------------------------------*/
hypre_SetIndex(index,0,0,0);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index,-1,0,0);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index,1,0,0);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------
* Extract pointers for coarse grid operator - always 3-point:
*
* If A is symmetric so is Ac. We build only the
* lower triangular part (plus diagonal).
*
* ac_cc is pointer for center coefficient (etc.)
*-----------------------------------------------*/
hypre_SetIndex(index,0,0,0);
ac_cc = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
hypre_SetIndex(index,-1,0,0);
ac_cw = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
if(!hypre_StructMatrixSymmetric(A))
{
hypre_SetIndex(index,1,0,0);
ac_ce = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
}
/*-----------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer
* to data associated with the point which we are
* building the stencil for. The below offsets
* are used in refering to data associated with
* other points.
*-----------------------------------------------*/
hypre_SetIndex(index,1,0,0);
xOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
/*-----------------------------------------------
* non-symmetric case
*-----------------------------------------------*/
if(!hypre_StructMatrixSymmetric(A))
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size,
A_dbox, fstart, stridef, iA,
Ac_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,iAm1,iAp1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
iAm1 = iA - xOffsetA;
iAp1 = iA + xOffsetA;
ac_cw[iAc] = - a_cw[iA] *a_cw[iAm1] / a_cc[iAm1];
ac_cc[iAc] = a_cc[iA]
- a_cw[iA] * a_ce[iAm1] / a_cc[iAm1]
- a_ce[iA] * a_cw[iAp1] / a_cc[iAp1];
ac_ce[iAc] = - a_ce[iA] *a_ce[iAp1] / a_cc[iAp1];
}
hypre_BoxLoop2End(iA, iAc);
}
/*-----------------------------------------------
* symmetric case
*-----------------------------------------------*/
else
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size,
A_dbox, fstart, stridef, iA,
Ac_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,iAm1,iAp1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
iAm1 = iA - xOffsetA;
iAp1 = iA + xOffsetA;
ac_cw[iAc] = - a_cw[iA] *a_cw[iAm1] / a_cc[iAm1];
ac_cc[iAc] = a_cc[iA]
- a_cw[iA] * a_ce[iAm1] / a_cc[iAm1]
- a_ce[iA] * a_cw[iAp1] / a_cc[iAp1];
}
hypre_BoxLoop2End(iA, iAc);
}
} /* end ForBoxI */
hypre_StructMatrixAssemble(Ac);
/*-----------------------------------------------------------------------
* Collapse stencil in periodic direction on coarsest grid.
*-----------------------------------------------------------------------*/
if (hypre_IndexX(hypre_StructGridPeriodic(cgrid)) == 1)
{
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
Ac_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(Ac), ci);
/*-----------------------------------------------
* Extract pointers for coarse grid operator - always 3-point:
*
* If A is symmetric so is Ac. We build only the
* lower triangular part (plus diagonal).
*
* ac_cc is pointer for center coefficient (etc.)
*-----------------------------------------------*/
hypre_SetIndex(index,0,0,0);
ac_cc = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
hypre_SetIndex(index,-1,0,0);
ac_cw = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
if(!hypre_StructMatrixSymmetric(A))
{
hypre_SetIndex(index,1,0,0);
ac_ce = hypre_StructMatrixExtractPointerByIndex(Ac, ci, index);
}
/*-----------------------------------------------
* non-symmetric case
*-----------------------------------------------*/
if(!hypre_StructMatrixSymmetric(A))
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(A), loop_size,
Ac_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
ac_cc[iAc] += (ac_cw[iAc] + ac_ce[iAc]);
ac_cw[iAc] = 0.0;
ac_ce[iAc] = 0.0;
}
hypre_BoxLoop1End(iAc);
}
/*-----------------------------------------------
* symmetric case
*-----------------------------------------------*/
else
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructMatrixDim(A), loop_size,
Ac_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(iAc)
{
ac_cc[iAc] += (2.0 * ac_cw[iAc]);
ac_cw[iAc] = 0.0;
}
hypre_BoxLoop1End(iAc);
}
} /* end ForBoxI */
}
hypre_StructMatrixAssemble(Ac);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CyclicReductionSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CyclicReductionSetup( void *cyc_red_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_CyclicReductionData *cyc_red_data = cyc_red_vdata;
MPI_Comm comm = (cyc_red_data -> comm);
HYPRE_Int cdir = (cyc_red_data -> cdir);
hypre_IndexRef base_index = (cyc_red_data -> base_index);
hypre_IndexRef base_stride = (cyc_red_data -> base_stride);
HYPRE_Int num_levels;
hypre_StructGrid **grid_l;
hypre_BoxArray *base_points;
hypre_BoxArray **fine_points_l;
double *data;
HYPRE_Int data_size = 0;
hypre_StructMatrix **A_l;
hypre_StructVector **x_l;
hypre_ComputePkg **down_compute_pkg_l;
hypre_ComputePkg **up_compute_pkg_l;
hypre_ComputeInfo *compute_info;
hypre_Index cindex;
hypre_Index findex;
hypre_Index stride;
hypre_StructGrid *grid;
hypre_Box *cbox;
HYPRE_Int l;
HYPRE_Int flop_divisor;
HYPRE_Int x_num_ghost[] = {0, 0, 0, 0, 0, 0};
/*-----------------------------------------------------
* Set up coarse grids
*-----------------------------------------------------*/
grid = hypre_StructMatrixGrid(A);
/* Compute a preliminary num_levels value based on the grid */
cbox = hypre_BoxDuplicate(hypre_StructGridBoundingBox(grid));
num_levels = hypre_Log2(hypre_BoxSizeD(cbox, cdir)) + 2;
grid_l = hypre_TAlloc(hypre_StructGrid *, num_levels);
hypre_StructGridRef(grid, &grid_l[0]);
for (l = 0; ; l++)
{
/* set cindex and stride */
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
/* check to see if we should coarsen */
if ( hypre_BoxIMinD(cbox, cdir) == hypre_BoxIMaxD(cbox, cdir) )
{
/* stop coarsening */
break;
}
/* coarsen cbox */
hypre_ProjectBox(cbox, cindex, stride);
hypre_StructMapFineToCoarse(hypre_BoxIMin(cbox), cindex, stride,
hypre_BoxIMin(cbox));
hypre_StructMapFineToCoarse(hypre_BoxIMax(cbox), cindex, stride,
hypre_BoxIMax(cbox));
/* coarsen the grid */
hypre_StructCoarsen(grid_l[l], cindex, stride, 1, &grid_l[l+1]);
}
num_levels = l + 1;
/* free up some things */
hypre_BoxDestroy(cbox);
(cyc_red_data -> num_levels) = num_levels;
(cyc_red_data -> grid_l) = grid_l;
/*-----------------------------------------------------
* Set up base points
*-----------------------------------------------------*/
base_points = hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid_l[0]));
hypre_ProjectBoxArray(base_points, base_index, base_stride);
(cyc_red_data -> base_points) = base_points;
/*-----------------------------------------------------
* Set up fine points
*-----------------------------------------------------*/
fine_points_l = hypre_TAlloc(hypre_BoxArray *, num_levels);
for (l = 0; l < (num_levels - 1); l++)
{
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetFIndex(base_index, base_stride, l, cdir, findex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
fine_points_l[l] =
hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid_l[l]));
hypre_ProjectBoxArray(fine_points_l[l], findex, stride);
}
fine_points_l[l] =
hypre_BoxArrayDuplicate(hypre_StructGridBoxes(grid_l[l]));
if (num_levels == 1)
{
hypre_ProjectBoxArray(fine_points_l[l], base_index, base_stride);
}
(cyc_red_data -> fine_points_l) = fine_points_l;
/*-----------------------------------------------------
* Set up matrix and vector structures
*-----------------------------------------------------*/
A_l = hypre_TAlloc(hypre_StructMatrix *, num_levels);
x_l = hypre_TAlloc(hypre_StructVector *, num_levels);
A_l[0] = hypre_StructMatrixRef(A);
x_l[0] = hypre_StructVectorRef(x);
x_num_ghost[2*cdir] = 1;
x_num_ghost[2*cdir + 1] = 1;
for (l = 0; l < (num_levels - 1); l++)
{
A_l[l+1] = hypre_CycRedCreateCoarseOp(A_l[l], grid_l[l+1], cdir);
data_size += hypre_StructMatrixDataSize(A_l[l+1]);
x_l[l+1] = hypre_StructVectorCreate(comm, grid_l[l+1]);
hypre_StructVectorSetNumGhost(x_l[l+1], x_num_ghost);
hypre_StructVectorInitializeShell(x_l[l+1]);
data_size += hypre_StructVectorDataSize(x_l[l+1]);
}
data = hypre_SharedCTAlloc(double, data_size);
(cyc_red_data -> data) = data;
for (l = 0; l < (num_levels - 1); l++)
{
hypre_StructMatrixInitializeData(A_l[l+1], data);
data += hypre_StructMatrixDataSize(A_l[l+1]);
hypre_StructVectorInitializeData(x_l[l+1], data);
hypre_StructVectorAssemble(x_l[l+1]);
data += hypre_StructVectorDataSize(x_l[l+1]);
}
(cyc_red_data -> A_l) = A_l;
(cyc_red_data -> x_l) = x_l;
/*-----------------------------------------------------
* Set up coarse grid operators
*-----------------------------------------------------*/
for (l = 0; l < (num_levels - 1); l++)
{
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
hypre_CycRedSetupCoarseOp(A_l[l], A_l[l+1], cindex, stride);
}
/*----------------------------------------------------------
* Set up compute packages
*----------------------------------------------------------*/
down_compute_pkg_l = hypre_TAlloc(hypre_ComputePkg *, (num_levels - 1));
up_compute_pkg_l = hypre_TAlloc(hypre_ComputePkg *, (num_levels - 1));
for (l = 0; l < (num_levels - 1); l++)
{
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetFIndex(base_index, base_stride, l, cdir, findex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
/* down-cycle */
hypre_CreateComputeInfo(grid_l[l], hypre_StructMatrixStencil(A_l[l]),
&compute_info);
hypre_ComputeInfoProjectSend(compute_info, findex, stride);
hypre_ComputeInfoProjectRecv(compute_info, findex, stride);
hypre_ComputeInfoProjectComp(compute_info, cindex, stride);
hypre_ComputePkgCreate(compute_info,
hypre_StructVectorDataSpace(x_l[l]), 1,
grid_l[l], &down_compute_pkg_l[l]);
/* up-cycle */
hypre_CreateComputeInfo(grid_l[l], hypre_StructMatrixStencil(A_l[l]),
&compute_info);
hypre_ComputeInfoProjectSend(compute_info, cindex, stride);
hypre_ComputeInfoProjectRecv(compute_info, cindex, stride);
hypre_ComputeInfoProjectComp(compute_info, findex, stride);
hypre_ComputePkgCreate(compute_info,
hypre_StructVectorDataSpace(x_l[l]), 1,
grid_l[l], &up_compute_pkg_l[l]);
}
(cyc_red_data -> down_compute_pkg_l) = down_compute_pkg_l;
(cyc_red_data -> up_compute_pkg_l) = up_compute_pkg_l;
/*-----------------------------------------------------
* Compute solve flops
*-----------------------------------------------------*/
flop_divisor = (hypre_IndexX(base_stride) *
hypre_IndexY(base_stride) *
hypre_IndexZ(base_stride) );
(cyc_red_data -> solve_flops) =
hypre_StructVectorGlobalSize(x_l[0])/2/flop_divisor;
(cyc_red_data -> solve_flops) +=
5*hypre_StructVectorGlobalSize(x_l[0])/2/flop_divisor;
for (l = 1; l < (num_levels - 1); l++)
{
(cyc_red_data -> solve_flops) +=
10*hypre_StructVectorGlobalSize(x_l[l])/2;
}
if (num_levels > 1)
{
(cyc_red_data -> solve_flops) +=
hypre_StructVectorGlobalSize(x_l[l])/2;
}
/*-----------------------------------------------------
* Finalize some things
*-----------------------------------------------------*/
#if DEBUG
{
char filename[255];
/* debugging stuff */
for (l = 0; l < num_levels; l++)
{
hypre_sprintf(filename, "yout_A.%02d", l);
hypre_StructMatrixPrint(filename, A_l[l], 0);
}
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CyclicReduction
*
* The solution vectors on each level are also used to store the
* right-hand-side data. We can do this because of the red-black
* nature of the algorithm and the fact that the method is exact,
* allowing one to assume initial guesses of zero on all grid levels.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CyclicReduction( void *cyc_red_vdata,
hypre_StructMatrix *A,
hypre_StructVector *b,
hypre_StructVector *x )
{
hypre_CyclicReductionData *cyc_red_data = cyc_red_vdata;
HYPRE_Int num_levels = (cyc_red_data -> num_levels);
HYPRE_Int cdir = (cyc_red_data -> cdir);
hypre_IndexRef base_index = (cyc_red_data -> base_index);
hypre_IndexRef base_stride = (cyc_red_data -> base_stride);
hypre_BoxArray *base_points = (cyc_red_data -> base_points);
hypre_BoxArray **fine_points_l = (cyc_red_data -> fine_points_l);
hypre_StructMatrix **A_l = (cyc_red_data -> A_l);
hypre_StructVector **x_l = (cyc_red_data -> x_l);
hypre_ComputePkg **down_compute_pkg_l =
(cyc_red_data -> down_compute_pkg_l);
hypre_ComputePkg **up_compute_pkg_l =
(cyc_red_data -> up_compute_pkg_l);
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_CommHandle *comm_handle;
hypre_BoxArrayArray *compute_box_aa;
hypre_BoxArray *compute_box_a;
hypre_Box *compute_box;
hypre_Box *A_dbox;
hypre_Box *x_dbox;
hypre_Box *b_dbox;
hypre_Box *xc_dbox;
double *Ap, *Awp, *Aep;
double *xp, *xwp, *xep;
double *bp;
double *xcp;
HYPRE_Int Ai;
HYPRE_Int xi;
HYPRE_Int bi;
HYPRE_Int xci;
hypre_Index cindex;
hypre_Index stride;
hypre_Index index;
hypre_Index loop_size;
hypre_Index start;
hypre_Index startc;
hypre_Index stridec;
HYPRE_Int compute_i, fi, ci, j, l;
hypre_BeginTiming(cyc_red_data -> time_index);
/*--------------------------------------------------
* Initialize some things
*--------------------------------------------------*/
hypre_SetIndex(stridec, 1, 1, 1);
hypre_StructMatrixDestroy(A_l[0]);
hypre_StructVectorDestroy(x_l[0]);
A_l[0] = hypre_StructMatrixRef(A);
x_l[0] = hypre_StructVectorRef(x);
/*--------------------------------------------------
* Copy b into x
*--------------------------------------------------*/
compute_box_a = base_points;
hypre_ForBoxI(fi, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, fi);
x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), fi);
b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), fi);
xp = hypre_StructVectorBoxData(x, fi);
bp = hypre_StructVectorBoxData(b, fi);
hypre_CopyIndex(hypre_BoxIMin(compute_box), start);
hypre_BoxGetStrideSize(compute_box, base_stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
x_dbox, start, base_stride, xi,
b_dbox, start, base_stride, bi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,bi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, bi)
{
xp[xi] = bp[bi];
}
hypre_BoxLoop2End(xi, bi);
}
/*--------------------------------------------------
* Down cycle:
*
* 1) Do an F-relaxation sweep with zero initial guess
* 2) Compute and inject residual at C-points
* - computations are at C-points
* - communications are at F-points
*
* Notes:
* - Before these two steps are executed, the
* fine-grid solution vector contains the right-hand-side.
* - After these two steps are executed, the fine-grid
* solution vector contains the right-hand side at
* C-points and the current solution approximation at
* F-points. The coarse-grid solution vector contains
* the restricted (injected) fine-grid residual.
*--------------------------------------------------*/
for (l = 0; l < num_levels - 1 ; l++)
{
/* set cindex and stride */
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
/* Step 1 */
compute_box_a = fine_points_l[l];
hypre_ForBoxI(fi, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, fi);
A_dbox =
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_l[l]), fi);
x_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l]), fi);
hypre_SetIndex(index, 0, 0, 0);
Ap = hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xp = hypre_StructVectorBoxData(x_l[l], fi);
hypre_CopyIndex(hypre_BoxIMin(compute_box), start);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
A_dbox, start, stride, Ai,
x_dbox, start, stride, xi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(Ai, xi)
{
xp[xi] /= Ap[Ai];
}
hypre_BoxLoop2End(Ai, xi);
}
/* Step 2 */
fgrid = hypre_StructVectorGrid(x_l[l]);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructVectorGrid(x_l[l+1]);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x_l[l]);
hypre_InitializeIndtComputations(down_compute_pkg_l[l], xp,
&comm_handle);
compute_box_aa =
hypre_ComputePkgIndtBoxes(down_compute_pkg_l[l]);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa =
hypre_ComputePkgDeptBoxes(down_compute_pkg_l[l]);
}
break;
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
compute_box_a =
hypre_BoxArrayArrayBoxArray(compute_box_aa, fi);
A_dbox =
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_l[l]), fi);
x_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l]), fi);
xc_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l+1]), ci);
xp = hypre_StructVectorBoxData(x_l[l], fi);
xcp = hypre_StructVectorBoxData(x_l[l+1], ci);
hypre_SetIndex(index, -1, 0, 0);
Awp =
hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xwp = hypre_StructVectorBoxData(x_l[l], fi) +
hypre_BoxOffsetDistance(x_dbox, index);
hypre_SetIndex(index, 1, 0, 0);
Aep =
hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xep = hypre_StructVectorBoxData(x_l[l], fi) +
hypre_BoxOffsetDistance(x_dbox, index);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
hypre_CopyIndex(hypre_BoxIMin(compute_box), start);
hypre_StructMapFineToCoarse(start, cindex, stride,
startc);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
hypre_BoxLoop3Begin(hypre_StructVectorDim(x), loop_size,
A_dbox, start, stride, Ai,
x_dbox, start, stride, xi,
xc_dbox, startc, stridec, xci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,xci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(Ai, xi, xci)
{
xcp[xci] = xp[xi] -
Awp[Ai]*xwp[xi] -
Aep[Ai]*xep[xi];
}
hypre_BoxLoop3End(Ai, xi, xci);
}
}
}
}
/*--------------------------------------------------
* Coarsest grid:
*
* Do an F-relaxation sweep with zero initial guess
*
* This is the same as step 1 in above, but is
* broken out as a sepecial case to add a check
* for zero diagonal that can occur for singlar
* problems like the full Neumann problem.
*--------------------------------------------------*/
/* set cindex and stride */
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
compute_box_a = fine_points_l[l];
hypre_ForBoxI(fi, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, fi);
A_dbox =
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_l[l]), fi);
x_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l]), fi);
hypre_SetIndex(index, 0, 0, 0);
Ap = hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xp = hypre_StructVectorBoxData(x_l[l], fi);
hypre_CopyIndex(hypre_BoxIMin(compute_box), start);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
A_dbox, start, stride, Ai,
x_dbox, start, stride, xi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(Ai, xi)
{
if (Ap[Ai] != 0.0)
{
xp[xi] /= Ap[Ai];
}
}
hypre_BoxLoop2End(Ai, xi);
}
/*--------------------------------------------------
* Up cycle:
*
* 1) Inject coarse error into fine-grid solution
* vector (this is the solution at the C-points)
* 2) Do an F-relaxation sweep on Ax = 0 and update
* solution at F-points
* - computations are at F-points
* - communications are at C-points
*--------------------------------------------------*/
for (l = (num_levels - 2); l >= 0; l--)
{
/* set cindex and stride */
hypre_CycRedSetCIndex(base_index, base_stride, l, cdir, cindex);
hypre_CycRedSetStride(base_index, base_stride, l, cdir, stride);
/* Step 1 */
fgrid = hypre_StructVectorGrid(x_l[l]);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructVectorGrid(x_l[l+1]);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
compute_box = hypre_BoxArrayBox(cgrid_boxes, ci);
hypre_CopyIndex(hypre_BoxIMin(compute_box), startc);
hypre_StructMapCoarseToFine(startc, cindex, stride, start);
x_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l]), fi);
xc_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l+1]), ci);
xp = hypre_StructVectorBoxData(x_l[l], fi);
xcp = hypre_StructVectorBoxData(x_l[l+1], ci);
hypre_BoxGetSize(compute_box, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
x_dbox, start, stride, xi,
xc_dbox, startc, stridec, xci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,xci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xi, xci)
{
xp[xi] = xcp[xci];
}
hypre_BoxLoop2End(xi, xci);
}
/* Step 2 */
for (compute_i = 0; compute_i < 2; compute_i++)
{
switch(compute_i)
{
case 0:
{
xp = hypre_StructVectorData(x_l[l]);
hypre_InitializeIndtComputations(up_compute_pkg_l[l], xp,
&comm_handle);
compute_box_aa =
hypre_ComputePkgIndtBoxes(up_compute_pkg_l[l]);
}
break;
case 1:
{
hypre_FinalizeIndtComputations(comm_handle);
compute_box_aa =
hypre_ComputePkgDeptBoxes(up_compute_pkg_l[l]);
}
break;
}
hypre_ForBoxArrayI(fi, compute_box_aa)
{
compute_box_a =
hypre_BoxArrayArrayBoxArray(compute_box_aa, fi);
A_dbox =
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_l[l]), fi);
x_dbox =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x_l[l]), fi);
hypre_SetIndex(index, 0, 0, 0);
Ap = hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xp = hypre_StructVectorBoxData(x_l[l], fi);
hypre_SetIndex(index, -1, 0, 0);
Awp =
hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xwp = hypre_StructVectorBoxData(x_l[l], fi) +
hypre_BoxOffsetDistance(x_dbox, index);
hypre_SetIndex(index, 1, 0, 0);
Aep =
hypre_StructMatrixExtractPointerByIndex(A_l[l], fi, index);
xep = hypre_StructVectorBoxData(x_l[l], fi) +
hypre_BoxOffsetDistance(x_dbox, index);
hypre_ForBoxI(j, compute_box_a)
{
compute_box = hypre_BoxArrayBox(compute_box_a, j);
hypre_CopyIndex(hypre_BoxIMin(compute_box), start);
hypre_BoxGetStrideSize(compute_box, stride, loop_size);
hypre_BoxLoop2Begin(hypre_StructVectorDim(x), loop_size,
A_dbox, start, stride, Ai,
x_dbox, start, stride, xi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(Ai, xi)
{
xp[xi] -= (Awp[Ai]*xwp[xi] +
Aep[Ai]*xep[xi] ) / Ap[Ai];
}
hypre_BoxLoop2End(Ai, xi);
}
}
}
}
/*-----------------------------------------------------
* Finalize some things
*-----------------------------------------------------*/
hypre_IncFLOPCount(cyc_red_data -> solve_flops);
hypre_EndTiming(cyc_red_data -> time_index);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CyclicReductionSetBase
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CyclicReductionSetBase( void *cyc_red_vdata,
hypre_Index base_index,
hypre_Index base_stride )
{
hypre_CyclicReductionData *cyc_red_data = cyc_red_vdata;
HYPRE_Int d;
for (d = 0; d < 3; d++)
{
hypre_IndexD((cyc_red_data -> base_index), d) =
hypre_IndexD(base_index, d);
hypre_IndexD((cyc_red_data -> base_stride), d) =
hypre_IndexD(base_stride, d);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CyclicReductionDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CyclicReductionDestroy( void *cyc_red_vdata )
{
hypre_CyclicReductionData *cyc_red_data = cyc_red_vdata;
HYPRE_Int l;
if (cyc_red_data)
{
hypre_BoxArrayDestroy(cyc_red_data -> base_points);
hypre_StructGridDestroy(cyc_red_data -> grid_l[0]);
hypre_StructMatrixDestroy(cyc_red_data -> A_l[0]);
hypre_StructVectorDestroy(cyc_red_data -> x_l[0]);
for (l = 0; l < ((cyc_red_data -> num_levels) - 1); l++)
{
hypre_StructGridDestroy(cyc_red_data -> grid_l[l+1]);
hypre_BoxArrayDestroy(cyc_red_data -> fine_points_l[l]);
hypre_StructMatrixDestroy(cyc_red_data -> A_l[l+1]);
hypre_StructVectorDestroy(cyc_red_data -> x_l[l+1]);
hypre_ComputePkgDestroy(cyc_red_data -> down_compute_pkg_l[l]);
hypre_ComputePkgDestroy(cyc_red_data -> up_compute_pkg_l[l]);
}
hypre_BoxArrayDestroy(cyc_red_data -> fine_points_l[l]);
hypre_SharedTFree(cyc_red_data -> data);
hypre_TFree(cyc_red_data -> grid_l);
hypre_TFree(cyc_red_data -> fine_points_l);
hypre_TFree(cyc_red_data -> A_l);
hypre_TFree(cyc_red_data -> x_l);
hypre_TFree(cyc_red_data -> down_compute_pkg_l);
hypre_TFree(cyc_red_data -> up_compute_pkg_l);
hypre_FinalizeTiming(cyc_red_data -> time_index);
hypre_TFree(cyc_red_data);
}
return hypre_error_flag;
}
|
GB_binop__gt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__gt_uint8
// A.*B function (eWiseMult): GB_AemultB__gt_uint8
// A*D function (colscale): GB_AxD__gt_uint8
// D*A function (rowscale): GB_DxB__gt_uint8
// C+=B function (dense accum): GB_Cdense_accumB__gt_uint8
// C+=b function (dense accum): GB_Cdense_accumb__gt_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_uint8
// C=scalar+B GB_bind1st__gt_uint8
// C=scalar+B' GB_bind1st_tran__gt_uint8
// C=A+scalar GB_bind2nd__gt_uint8
// C=A'+scalar GB_bind2nd_tran__gt_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__gt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__gt_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__gt_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__gt_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__gt_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__gt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__gt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__gt_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__gt_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__gt_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__gt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mem_inl.h | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
namespace nvbio {
// find all MEMs covering a given base
//
// \return the right-most position covered by a MEM
//
template <typename pattern_type, typename fm_index_type, typename delegate_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 find_kmems(
const uint32 pattern_len,
const pattern_type pattern,
const uint32 x,
const fm_index_type f_index,
const fm_index_type r_index,
delegate_type& handler,
const uint32 min_intv,
const uint32 min_span)
{
typedef typename fm_index_type::index_type coord_type;
typedef typename fm_index_type::range_type range_type;
uint4 right_mems[1024];
nvbio::vector_view<uint4*> right_ranges( 0, &right_mems[0] );
// find how far can we extend right starting from x
//
{
// extend forward, using the reverse index
range_type f_range = make_vector( coord_type(0u), f_index.length() );
range_type r_range = make_vector( coord_type(0u), r_index.length() );
range_type prev_range = f_range;
uint32 i;
for (i = x; i < pattern_len; ++i)
{
const uint8 c = pattern[i];
if (c > 3) // there is an N here. no match
{
prev_range = f_range;
break;
}
// search c in the FM-index
extend_forward( f_index, r_index, f_range, r_range, c );
// check if the range is too small
if (1u + f_range.y - f_range.x < min_intv)
break;
// store the range
if (f_range.y - f_range.x != prev_range.y - prev_range.x)
{
// do not add the empty span
if (i > x)
right_ranges.push_back( make_vector( prev_range.x, prev_range.y, coord_type(x), coord_type(i) ) );
prev_range = f_range;
}
}
// check if we still need to save one range
if (right_ranges.size() && (right_ranges.back().y - right_ranges.back().x) != (prev_range.y - prev_range.x))
right_ranges.push_back( make_vector( prev_range.x, prev_range.y, coord_type(x), coord_type(i) ) );
}
// no valid match covering x
if (right_ranges.size() == 0u)
return x;
// save the result value for later
const uint32 rightmost_base = right_ranges.back().w;
// now extend backwards, using the forward index
//
// we basically loop through all MEMs ending in [x,x+n_ranges) starting
// from the end of the range and walking backwards - and for each of them we:
//
// - find their starting point through backwards search
//
// - and add them to the output only if they extend further left than
// any of the previously found ones
//
// keep track of the left-most coordinate covered by a MEM
uint32 leftmost_coordinate = x+1;
for (int32 j = right_ranges.size()-1; j >= 0; --j)
{
range_type f_range = make_vector( right_ranges[j].x, right_ranges[j].y );
const uint32 r = right_ranges[j].w;
// extend from r to the left as much possible
const fm_index_type& index = f_index;
int32 l;
for (l = int32(x) - 1; l >= 0; --l)
{
const uint8 c = pattern[l];
if (c > 3) // there is an N here. no match
break;
// search c in the FM-index
const range_type c_rank = rank(
index,
make_vector( f_range.x-1, f_range.y ),
c );
const range_type new_range = make_vector(
index.L2(c) + c_rank.x + 1,
index.L2(c) + c_rank.y );
// stop if the range became too small
if (1u + new_range.y - new_range.x < min_intv)
break;
// update the range
f_range = new_range;
}
// only output the range if it's not contained in any other MEM
if (uint32(l+1) < leftmost_coordinate && uint32(l+1) < r)
{
// save the range, together with its span
const uint2 pattern_span = make_uint2( uint32(l+1), r );
// keep the MEM only if it is above a certain length
if (pattern_span.y - pattern_span.x >= min_span)
{
// pass all results to the delegate
handler.output( f_range, pattern_span );
// update the left-most covered coordinate
leftmost_coordinate = uint32(l+1);
}
}
}
// return the right-most end of the MEMs covering x
return rightmost_base;
}
// find all k-MEMs covering a given base, for each k
//
// \return the right-most position covered by a MEM
//
template <typename pattern_type, typename fm_index_type, typename delegate_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 find_threshold_kmems(
const uint32 pattern_len,
const pattern_type pattern,
const uint32 x,
const fm_index_type f_index,
const fm_index_type r_index,
delegate_type& handler,
const uint32 min_intv,
const uint32 min_span)
{
typedef typename fm_index_type::index_type coord_type;
typedef typename fm_index_type::range_type range_type;
uint4 mems1[1024];
uint4 mems2[1024];
nvbio::vector_view<uint4*> prev( 0, &mems1[0] );
nvbio::vector_view<uint4*> curr( 0, &mems2[0] );
// find how far can we extend right starting from x
//
{
// extend forward, using the reverse index
range_type f_range = make_vector( coord_type(0u), f_index.length() );
range_type r_range = make_vector( coord_type(0u), r_index.length() );
range_type prev_range = f_range;
uint32 i;
for (i = x; i < pattern_len; ++i)
{
const uint8 c = pattern[i];
if (c > 3) // there is an N here. no match
{
prev_range = f_range;
break;
}
// search c in the FM-index
extend_forward( f_index, r_index, f_range, r_range, c );
// check if the range is too small
if (1u + f_range.y - f_range.x < min_intv)
break;
// store the range
if (f_range.y - f_range.x != prev_range.y - prev_range.x)
{
// do not add the empty span
if (i > x)
curr.push_back( make_vector( prev_range.x, prev_range.y, coord_type(x), coord_type(i) ) );
prev_range = f_range;
}
}
// check if we still need to save one range
if (curr.size() && (curr.back().y - curr.back().x) != (prev_range.y - prev_range.x))
curr.push_back( make_vector( prev_range.x, prev_range.y, coord_type(x), coord_type(i) ) );
// swap prev and curr
nvbio::vector_view<uint4*> tmp = prev;
prev = curr;
curr = tmp;
}
// no valid match covering x
if (prev.size() == 0u)
return x;
// save the result value for later
const uint32 rightmost_base = prev.back().w;
uint32 out_n = 0;
uint32 out_i = x+1;
for (int32 i = int32(x) - 1; i >= -1; --i)
{
// backward search for MEMs
const uint8 c = i < 0 ? 4u : pattern[i]; // c > 3 if i < 0 or pattern[i] is an ambiguous base
// reset the output buffer size
curr.resize( 0 );
for (int32 j = prev.size()-1; j >= 0; --j)
{
const range_type f_range = make_vector( prev[j].x, prev[j].y );
const uint32 r_span = prev[j].w;
// search c in the FM-index
const range_type c_rank = c < 4u ?
rank( f_index, make_vector( f_range.x-1, f_range.y ), c ) :
make_vector( coord_type(0), coord_type(0) );
const range_type new_range = make_vector(
f_index.L2(c) + c_rank.x + 1,
f_index.L2(c) + c_rank.y );
// keep the hit if reaching the beginning or an ambiguous base or the intv is small enough
if (c > 3u || c_rank.y - c_rank.x < min_intv)
{
// test curr_n > 0 to make sure there are no longer matches
if (curr.size() == 0)
{
if (out_n == 0 || i+1 < out_i) // skip contained matches
{
// save the range, together with its span
const uint2 pattern_span = make_uint2( i+1, r_span );
// keep the MEM only if it is above a certain length
if (pattern_span.y - pattern_span.x >= min_span)
{
// pass all results to the delegate
handler.output( f_range, pattern_span );
out_n++;
out_i = i+1;
}
}
} // otherwise the match is contained in another longer match
}
else if (curr.size() == 0 || (new_range.y - new_range.x) != (curr.back().y - curr.back().x))
{
// save the range, together with its span
curr.push_back( make_vector(
new_range.x,
new_range.y,
coord_type( i+1 ),
coord_type( r_span ) ) );
}
}
// check whether there's no more work left
if (curr.size() == 0)
break;
// swap prev and curr
nvbio::vector_view<uint4*> tmp = prev;
prev = curr;
curr = tmp;
}
// return the right-most end of the MEMs covering x
return rightmost_base;
}
namespace mem {
// return the size of a given range
template <typename rank_type>
struct range_size
{
typedef rank_type argument_type;
typedef uint64 result_type;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint64 operator() (const rank_type range) const { return range.range_size(); }
};
// return the string-id of a given MEM rank
template <typename rank_type>
struct rank_string_id
{
typedef rank_type argument_type;
typedef uint32 result_type;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 operator() (const rank_type range) const { return range.z; }
};
// return the span of a given range
template <typename rank_type>
struct rank_span_size
{
typedef rank_type argument_type;
typedef uint64 result_type;
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint64 operator() (const rank_type range) const
{
return (range.w >> 16u) - (range.w & 0xFFu);
}
};
// a simple mem handler
template <typename coord_type>
struct mem_handler
{
static const uint32 MAX_SIZE = 1024;
typedef typename vector_type<coord_type,2u>::type range_type;
typedef MEMRange<coord_type> rank_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_handler(const uint32 _string_id, const uint32 _max_intv) :
string_id(_string_id),
max_intv(_max_intv),
n_mems(0) {}
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void output(const range_type range, const uint2 span)
{
if (n_mems >= MAX_SIZE)
return;
// check whether the SA range is small enough
if (1u + range.y - range.x <= max_intv)
{
mems[ n_mems++ ] = rank_type(
range,
string_id,
span );
}
}
const uint32 string_id;
const uint32 max_intv;
rank_type mems[MAX_SIZE];
uint32 n_mems;
};
template <typename index_type, typename string_set_type>
struct mem_functor
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMRange<coord_type> mem_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_functor(
const index_type _f_index,
const index_type _r_index,
const string_set_type _string_set,
const uint32 _min_intv,
const uint32 _max_intv,
const uint32 _min_span,
VectorArrayView<mem_type> _mem_arrays) :
f_index ( _f_index ),
r_index ( _r_index ),
string_set ( _string_set ),
min_intv ( _min_intv ),
max_intv ( _max_intv ),
min_span ( _min_span ),
mem_arrays ( _mem_arrays ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 string_id) const
{
// fetch the pattern
typename string_set_type::string_type pattern = string_set[ string_id ];
// compute its length
const uint32 pattern_len = nvbio::length( pattern );
// build a MEM handler
mem_handler<coord_type> handler( string_id, max_intv );
// and collect all MEMs
for (uint32 x = 0; x < pattern_len;)
{
// find MEMs covering x and move to the next uncovered position along the pattern
const uint32 y = find_kmems(
pattern_len,
pattern,
x,
f_index,
r_index,
handler,
min_intv,
min_span );
x = nvbio::max( y, x+1u );
}
// output the array of results
if (handler.n_mems)
{
mem_type* output = mem_arrays.alloc( string_id, handler.n_mems );
if (output != NULL)
{
// output in reverse order, i.e. sorted by the starting coordinate
for (uint32 i = 0; i < handler.n_mems; ++i)
output[i] = handler.mems[ handler.n_mems - i - 1u ];
}
}
}
const index_type f_index;
const index_type r_index;
const string_set_type string_set;
const uint32 min_intv;
const uint32 max_intv;
const uint32 min_span;
mutable VectorArrayView<mem_type> mem_arrays;
};
// find all MEMs covering a given base
//
// \return the right-most position covered by a MEM
//
template <typename pattern_type, typename fm_index_type, typename output_vector>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
uint32 right_kmems(
const uint32 pattern_len,
const pattern_type pattern,
const uint32 string_id,
const uint32 x,
const fm_index_type f_index,
const fm_index_type r_index,
output_vector& output,
const uint32 min_intv)
{
typedef typename fm_index_type::index_type coord_type;
typedef typename fm_index_type::range_type range_type;
typedef MEMRange<coord_type> mem_type;
// find how far can we extend right starting from x
//
// internal output
mem_type mems[1024];
vector_view<mem_type*> mems_vec( 0u, mems );
// extend forward, using the reverse index
range_type f_range = make_vector( coord_type(0u), f_index.length() );
range_type r_range = make_vector( coord_type(0u), r_index.length() );
range_type prev_range = f_range;
uint32 i;
for (i = x; i < pattern_len; ++i)
{
const uint8 c = pattern[i];
if (c > 3) // there is an N here. no match
{
prev_range = f_range;
break;
}
// search c in the FM-index
extend_forward( f_index, r_index, f_range, r_range, c );
// check if the range is too small
if (1u + f_range.y - f_range.x < min_intv)
break;
// store the range
if (f_range.y - f_range.x != prev_range.y - prev_range.x)
{
// do not add the empty span
if (i > x)
mems_vec.push_back( mem_type( prev_range, string_id, x, i ) );
prev_range = f_range;
}
}
// check if we still need to save one range
if (mems_vec.size() && (mems_vec.back().range().y - mems_vec.back().range().x) != (prev_range.y - prev_range.x))
mems_vec.push_back( mem_type( prev_range, string_id, x, i ) );
// no valid match covering x
if (mems_vec.size() == 0u)
return x;
// output them in reverse order, i.e. from largest to smallest
for (uint32 i = 0; i < mems_vec.size(); ++i)
{
mem_type mem = mems_vec[ mems_vec.size() - i - 1u ];
// add a special marker to identify the rightmost MEM covering a given position later on
if (i == 0)
mem.set_group_flag();
output.push_back( mem );
}
// return the rightmost coordinate
return mems_vec.back().span().y;
}
template <typename index_type, typename string_set_type>
struct right_mem_functor
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMRange<coord_type> mem_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
right_mem_functor(
const index_type _f_index,
const index_type _r_index,
const string_set_type _string_set,
const uint32 _min_intv,
const uint32 _max_intv,
const uint32 _min_span,
VectorArrayView<mem_type> _mem_arrays) :
f_index ( _f_index ),
r_index ( _r_index ),
string_set ( _string_set ),
min_intv ( _min_intv ),
max_intv ( _max_intv ),
min_span ( _min_span ),
mem_arrays ( _mem_arrays ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 string_id) const
{
// fetch the pattern
typename string_set_type::string_type pattern = string_set[ string_id ];
// compute its length
const uint32 pattern_len = nvbio::length( pattern );
// build a MEM handler
mem_type mems[1024];
vector_view<mem_type*> mems_vec( 0u, mems );
// and collect all MEMs
for (uint32 x = 0; x < pattern_len;)
{
const uint32 y = right_kmems(
pattern_len,
pattern,
string_id,
x,
f_index,
r_index,
mems_vec,
min_intv );
x = nvbio::max( y, x+1u );
}
// output the array of results
if (mems_vec.size())
{
mem_type* output = mem_arrays.alloc( string_id, mems_vec.size() );
if (output != NULL)
{
for (uint32 i = 0; i < mems_vec.size(); ++i)
output[i] = mems_vec[i];
}
}
}
const index_type f_index;
const index_type r_index;
const string_set_type string_set;
const uint32 min_intv;
const uint32 max_intv;
const uint32 min_span;
mutable VectorArrayView<mem_type> mem_arrays;
};
template <typename index_type, typename string_set_type>
struct split_mem_functor
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMRange<coord_type> mem_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
split_mem_functor(
const index_type _f_index,
const index_type _r_index,
const string_set_type _string_set,
const uint32 _split_len,
const uint32 _split_width,
const uint32 _min_intv,
const uint32 _max_intv,
const uint32 _min_span,
VectorArrayView<mem_type> _in_mem_arrays,
VectorArrayView<mem_type> _out_mem_arrays) :
f_index ( _f_index ),
r_index ( _r_index ),
string_set ( _string_set ),
split_len ( _split_len ),
split_width ( _split_width ),
min_intv ( _min_intv ),
max_intv ( _max_intv ),
min_span ( _min_span ),
in_mem_arrays ( _in_mem_arrays ),
out_mem_arrays ( _out_mem_arrays ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void operator() (const uint32 string_id) const
{
// fetch the pattern
typename string_set_type::string_type pattern = string_set[ string_id ];
// compute its length
const uint32 pattern_len = nvbio::length( pattern );
const uint32 n_mems = in_mem_arrays.size( string_id );
const mem_type* in_mems = in_mem_arrays[ string_id ];
// build a MEM handler
mem_type mems[2048];
vector_view<mem_type*> mems_vec( 0u, mems );
// and collect all MEMs
#if 0
for (uint32 group_begin = 0; group_begin < n_mems;)
{
// find the end of the group as well as the longest MEM in the group
uint32 group_end;
uint32 max_i = 0u;
uint32 max = 0u;
for (group_end = group_begin; group_end < n_mems; ++group_end)
{
const mem_type mem = in_mems[ group_end ];
if (max <= mem.span().y - mem.span().x)
{
max = mem.span().y - mem.span().x;
max_i = group_end;
}
if (group_end > group_begin && mem.group_flag())
break;
}
// process all MEMs in the group splitting the longest one
for (uint32 i = group_begin; i < group_end; ++i)
{
const mem_type mem = in_mems[i];
const uint32 n_occ = 1u + mem.range().y - mem.range().x;
if (i == max_i &&
mem.span().y - mem.span().x >= split_len &&
n_occ <= split_width)
{
const uint32 y = right_kmems(
pattern_len,
pattern,
string_id,
(mem.span().x + mem.span().y) / 2u,
f_index,
r_index,
mems_vec,
n_occ+1u );
}
else
mems_vec.push_back( mem );
}
// advance to the next group
group_begin = group_end;
}
#else
for (uint32 i = 0; i < n_mems; ++i)
{
const mem_type mem = in_mems[i];
if (mem.span().y - mem.span().x >= split_len &&
mem.range_size() <= split_width)
{
const uint32 y = right_kmems(
pattern_len,
pattern,
string_id,
(mem.span().x + mem.span().y) / 2u,
f_index,
r_index,
mems_vec,
mem.range_size() + 1u );
}
else
mems_vec.push_back( mem );
}
#endif
// output the array of results
if (mems_vec.size())
{
mem_type* output = out_mem_arrays.alloc( string_id, mems_vec.size() );
if (output != NULL)
{
for (uint32 i = 0; i < mems_vec.size(); ++i)
output[i] = mems_vec[i];
}
}
}
const index_type f_index;
const index_type r_index;
const string_set_type string_set;
const uint32 split_len;
const uint32 split_width;
const uint32 min_intv;
const uint32 max_intv;
const uint32 min_span;
const VectorArrayView<mem_type> in_mem_arrays;
mutable VectorArrayView<mem_type> out_mem_arrays;
};
// A functor to extend a MEM to the left
//
template <typename index_type, typename string_set_type>
struct left_mem_functor
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMRange<coord_type> mem_type;
// functor typedefs
typedef mem_type argument_type;
typedef mem_type result_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
left_mem_functor(
const index_type _f_index,
const index_type _r_index,
const string_set_type _string_set,
const uint32 _min_intv,
const uint32 _max_intv,
const uint32 _min_span) :
f_index ( _f_index ),
r_index ( _r_index ),
string_set ( _string_set ),
min_intv ( _min_intv ),
max_intv ( _max_intv ),
min_span ( _min_span ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_type operator() (const mem_type mem) const
{
range_type f_range = mem.range();
const uint32 string_id = mem.string_id();
const uint32 x = mem.span().x;
const uint32 r = mem.span().y;
// fetch the pattern
typename string_set_type::string_type pattern = string_set[ string_id ];
// extend from x to the left as much possible
const index_type& index = f_index;
int32 l;
for (l = int32(x) - 1; l >= 0; --l)
{
const uint8 c = pattern[l];
if (c > 3) // there is an N here. no match
break;
// search c in the FM-index
const range_type c_rank = rank(
index,
make_vector( f_range.x-1, f_range.y ),
c );
const range_type new_range = make_vector(
index.L2(c) + c_rank.x + 1,
index.L2(c) + c_rank.y );
// stop if the range became too small
if (1u + new_range.y - new_range.x < min_intv)
break;
// update the range
f_range = new_range;
}
return mem_type(
f_range,
string_id,
make_uint2( l+1, r ),
mem.group_flag() );
}
const index_type f_index;
const index_type r_index;
const string_set_type string_set;
const uint32 min_intv;
const uint32 max_intv;
const uint32 min_span;
};
template <typename coord_type>
struct filter_results
{
typedef typename vector_type<coord_type,2u>::type range_type;
typedef MEMRange<coord_type> rank_type;
typedef MEMHit<coord_type> mem_type;
typedef rank_type argument_type;
typedef mem_type result_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
filter_results(
const uint32 _n_ranges,
const uint64* _slots,
const rank_type* _ranges) :
n_ranges ( _n_ranges ),
slots ( _slots ),
ranges ( _ranges ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_type operator() (const uint64 output_index) const
{
// find the text q-gram slot corresponding to this output index
const uint32 slot = uint32( upper_bound(
output_index,
slots,
n_ranges ) - slots );
// locate the hit position
const rank_type range = ranges[ slot ];
const uint64 base_slot = slot ? slots[ slot-1 ] : 0u;
const uint32 local_index = output_index - base_slot;
// and write out the MEM occurrence
return mem_type(
make_vector(
coord_type( range.range().x + local_index ), // SA coordinate for this occurrence
coord_type( 0u ), // unused
coord_type( range.string_id() ), // string-id
range.coords.w ) ); // packed pattern span
}
const uint32 n_ranges;
const uint64* slots;
const rank_type* ranges;
};
template <typename index_type>
struct locate_ssa_results
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMHit<coord_type> mem_type;
typedef mem_type argument_type;
typedef mem_type result_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
locate_ssa_results(const index_type _index) : index( _index ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_type operator() (const mem_type mem) const
{
const range_type r = locate_ssa_iterator( index, mem.coords.x );
return mem_type(
make_vector(
coord_type( r.x ),
coord_type( r.y ),
mem.coords.z,
mem.coords.w ) );
}
const index_type index;
};
template <typename index_type>
struct lookup_ssa_results
{
typedef typename index_type::index_type coord_type;
typedef typename index_type::range_type range_type;
typedef MEMHit<coord_type> mem_type;
typedef mem_type argument_type;
typedef mem_type result_type;
// constructor
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
lookup_ssa_results(const index_type _index) : index( _index ) {}
// functor operator
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
mem_type operator() (const mem_type mem) const
{
const coord_type loc = lookup_ssa_iterator( index, make_vector( mem.coords.x, mem.coords.y ) );
return mem_type(
loc,
uint32( mem.coords.z ),
uint32( mem.coords.w ) & 0xFFu,
uint32( mem.coords.w ) >> 16u );
}
const index_type index;
};
// device kernel to reorder a vector array of mem-ranges
template <typename rank_type>
__global__
void discard_ranges_kernel(
const uint32 n_items, // # of input items
VectorArrayView<rank_type> ranges, // input vector array
const uint32 max_intv, // max SA interval size
const uint32 min_span, // minimum pattern span
const uint32 reverse) // reverse the output
{
const uint32 i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n_items)
return;
// fetch the i-th vector and discard any MEMs which don't pass our search criteria
const uint32 vec_size = ranges.size( i );
rank_type* vec = ranges[i];
// keep track of the leftmost MEM coordinate
uint32 leftmost_coordinate = uint32(-1);
uint32 group_begin = 0u;
uint32 n_output = 0u;
for (uint32 j = 0; j < vec_size; ++j)
{
rank_type rank = vec[j];
// check the special marker to see if this is the initial MEM for a group
if (rank.group_flag())
{
// reverse the order of the MEMs in the previous group, so so as to sort them by left coordinate
if (reverse)
{
for (uint32 k = 0; k < (n_output - group_begin)/2; ++k)
{
const rank_type tmp = vec[ group_begin + k ];
vec[ group_begin + k ] = vec[ n_output - k - 1u ];
vec[ n_output - k - 1u ] = tmp;
}
}
// reset the leftmost MEM coordinate
leftmost_coordinate = uint32(-1);
// reset the group start
group_begin = n_output;
}
const uint2 range = rank.range();
const uint2 span = rank.span();
// check whether we want to keep this MEM rank
if (span.x < leftmost_coordinate && // make sure it's not contained in a previous MEM
span.y - span.x >= min_span && // make sure its span is long enough
1u + range.y - range.x <= max_intv) // make sure it doesn't have too many occurrences
{
// make sure the first output entry in each group has the group flag set
if (leftmost_coordinate == uint32(-1))
rank.set_group_flag();
// output this entry
vec[ n_output++ ] = rank;
// update the leftmost coordinate
leftmost_coordinate = span.x;
}
}
// write out the new vector size
ranges.m_sizes[i] = n_output;
}
// device function to reorder a vector array of mem-ranges
template <typename rank_type>
void discard_ranges(
const device_tag system_tag, // system tag
const uint32 n_items, // # of input items
DeviceVectorArray<rank_type>& ranges, // input vector array
const uint32 max_intv, // max SA interval size
const uint32 min_span, // minimum pattern span
const bool reverse) // reverse the output
{
const uint32 block_dim = 128;
const uint32 n_blocks = util::divide_ri( n_items, block_dim );
discard_ranges_kernel<<<n_blocks,block_dim>>>(
n_items,
nvbio::plain_view( ranges ),
max_intv,
min_span,
reverse );
// reset the pool size
{
thrust::device_vector<uint8> d_temp_storage;
// compute the number of output MEM ranges
// NOTE: this reduction is necessary as discard_ranges might have changed the
// number of ranges effectively in use
const uint32 n_ranges = cuda::reduce(
n_items,
ranges.m_sizes.begin(),
thrust::plus<uint32>(),
d_temp_storage );
// reset the pool size
ranges.m_pool[0] = n_ranges;
}
}
// copy the array of ranges bound to index i into the proper position of the output
template <typename rank_type>
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
void copy_ranges(
const uint32 i, // vector index to copy
const VectorArrayView<rank_type> in_ranges, // input vector array
const uint32* slots, // output slots
vector_view<rank_type*,uint64> out_ranges) // output arena
{
const uint32 slot = slots[i];
const uint32 n_src = in_ranges.size( i );
const rank_type* src = in_ranges[i];
rank_type* dst = out_ranges + slot;
for (uint32 j = 0; j < n_src; ++j)
dst[j] = src[j];
}
// device kernel to reorder a vector array of mem-ranges
template <typename rank_type>
__global__
void reorder_ranges_kernel(
const uint32 n_items, // # of input items
const VectorArrayView<rank_type> in_ranges, // input vector array
const uint32* slots, // output slots
vector_view<rank_type*,uint64> out_ranges) // output arena
{
const uint32 i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n_items)
return;
copy_ranges( i, in_ranges, slots, out_ranges );
}
// device function to reorder a vector array of mem-ranges
template <typename rank_type>
void reorder_ranges(
const device_tag system_tag, // system tag
const uint32 n_items, // # of input items
const VectorArrayView<rank_type> in_ranges, // input vector array
const uint32* slots, // output slots
vector_view<rank_type*,uint64> out_ranges) // output arena
{
const uint32 block_dim = 128;
const uint32 n_blocks = util::divide_ri( n_items, block_dim );
reorder_ranges_kernel<<<n_blocks,block_dim>>>(
n_items,
in_ranges,
slots,
out_ranges );
}
// host function to reorder a vector array of mem-ranges
template <typename rank_type>
void reorder_ranges(
const host_tag system_tag, // system tag
const uint32 n_items, // # of input items
const VectorArrayView<rank_type> in_ranges, // input vector array
const uint32* slots, // output slots
vector_view<rank_type*,uint64> out_ranges) // output arena
{
#pragma omp parallel for
for (int i = 0; i < int( n_items ); ++i)
copy_ranges( i, in_ranges, slots, out_ranges );
}
} // namespace mem
// enact the filter on an FM-index and a string-set
//
// \param fm_index the FM-index
// \param string-set the query string-set
//
// \return the total number of hits
//
template <typename fm_index_type>
template <typename string_set_type>
uint64 MEMFilter<host_tag, fm_index_type>::rank(
const fm_index_type& f_index,
const fm_index_type& r_index,
const string_set_type& string_set,
const uint32 min_intv,
const uint32 max_intv,
const uint32 min_span,
const uint32 split_len,
const uint32 split_width)
{
// save the query
m_n_queries = string_set.size();
m_f_index = f_index;
m_r_index = r_index;
m_n_occurrences = 0;
const uint32 max_string_length = 256; // TODO: compute this
m_mem_ranges.resize( m_n_queries, max_string_length * m_n_queries );
// search the strings in the index, obtaining a set of ranges
thrust::for_each(
thrust::make_counting_iterator<uint32>(0u),
thrust::make_counting_iterator<uint32>(0u) + m_n_queries,
mem::mem_functor<fm_index_type,string_set_type>(
m_f_index,
m_r_index,
string_set,
min_intv,
max_intv,
min_span,
nvbio::plain_view( m_mem_ranges ) )
);
// fetch the number of output MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// reserve enough storage for the ranges
m_slots.resize( n_ranges );
if (n_ranges)
{
// reorder the arena by string-id
thrust::host_vector<rank_type> arena( n_ranges );
thrust::host_vector<uint32> slots( m_n_queries );
// scan the mem-range array sizes to get the new array slots
thrust::exclusive_scan(
m_mem_ranges.m_sizes.begin(),
m_mem_ranges.m_sizes.begin() + m_n_queries,
slots.begin() );
// and put everything in place
mem::reorder_ranges(
host_tag(),
m_n_queries,
nvbio::plain_view( m_mem_ranges ),
nvbio::plain_view( slots ),
nvbio::plain_view( arena ) );
// swap the new array indices and the arena
m_mem_ranges.m_index.swap( slots );
m_mem_ranges.m_arena.swap( arena );
// and now scan the range sizes
thrust::inclusive_scan(
thrust::make_transform_iterator( m_mem_ranges.m_arena.begin(), mem::range_size<rank_type>() ),
thrust::make_transform_iterator( m_mem_ranges.m_arena.begin(), mem::range_size<rank_type>() ) + n_ranges,
m_slots.begin() );
// fetch the total number of MEMs
m_n_occurrences = m_slots[ n_ranges - 1u ];
}
return m_n_occurrences;
}
// find the starting position for the MEM ranks corresponding to a given string
//
template <typename fm_index_type>
uint32 MEMFilter<host_tag, fm_index_type>::first_hit(const uint32 string_id) const
{
// fetch the total number of MEM ranges
if (string_id >= m_n_queries)
return m_n_occurrences;
const uint32 first_rank = m_mem_ranges.m_index[ string_id ];
// and find the corresponding MEM hits start
return first_rank ? m_slots[ first_rank-1u ] : 0u;
}
// enumerate all mems in a given range
//
// \tparam mems_iterator a mem_type iterator
//
// \param begin the beginning of the mems sequence to locate, in [0,n_mems)
// \param end the end of the mems sequence to locate, in [0,n_mems]
//
template <typename fm_index_type>
template <typename mems_iterator>
void MEMFilter<host_tag, fm_index_type>::locate(
const uint64 begin,
const uint64 end,
mems_iterator mems)
{
const uint32 n_hits = end - begin;
// fetch the number of output MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// fill the output hits with (SA,string-id) coordinates
thrust::transform(
thrust::make_counting_iterator<uint64>(0u) + begin,
thrust::make_counting_iterator<uint64>(0u) + end,
mems,
mem::filter_results<coord_type>(
n_ranges,
nvbio::plain_view( m_slots ),
nvbio::plain_view( m_mem_ranges.m_arena ) ) );
// locate the SSA iterators
thrust::transform(
mems,
mems + n_hits,
mems,
mem::locate_ssa_results<fm_index_type>( m_f_index ) );
// and perform the final lookup
thrust::transform(
mems,
mems + n_hits,
mems,
mem::lookup_ssa_results<fm_index_type>( m_f_index ) );
}
// enact the filter on an FM-index and a string-set
//
// \param fm_index the FM-index
// \param string-set the query string-set
//
// \return the total number of hits
//
template <typename fm_index_type>
template <typename string_set_type>
uint64 MEMFilter<device_tag, fm_index_type>::rank(
const fm_index_type& f_index,
const fm_index_type& r_index,
const string_set_type& string_set,
const uint32 min_intv,
const uint32 max_intv,
const uint32 min_span,
const uint32 split_len,
const uint32 split_width)
{
// save the query
m_n_queries = string_set.size();
m_f_index = f_index;
m_r_index = r_index;
m_n_occurrences = 0;
const uint32 max_string_length = 256; // TODO: compute this
m_mem_ranges.resize( m_n_queries, max_string_length * m_n_queries );
// search the strings in the index, obtaining a set of ranges
{
thrust::for_each(
thrust::make_counting_iterator<uint32>(0u),
thrust::make_counting_iterator<uint32>(0u) + m_n_queries,
mem::right_mem_functor<fm_index_type,string_set_type>(
m_f_index,
m_r_index,
string_set,
min_intv,
max_intv,
min_span,
nvbio::plain_view( m_mem_ranges ) )
);
// fetch the number of output MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// extend them left
thrust::transform(
m_mem_ranges.m_arena.begin(),
m_mem_ranges.m_arena.begin() + n_ranges,
m_mem_ranges.m_arena.begin(),
mem::left_mem_functor<fm_index_type,string_set_type>(
m_f_index,
m_r_index,
string_set,
min_intv,
max_intv,
min_span )
);
const bool do_split = split_len < uint32(-1);
// and discard MEM ranges we don't want to keep
mem::discard_ranges(
device_tag(),
m_n_queries,
m_mem_ranges,
max_intv,
min_span,
do_split ? false : true );
// split long MEMs
if (do_split)
{
DeviceVectorArray<rank_type> unsplit_mem_ranges;
m_mem_ranges.swap( unsplit_mem_ranges );
m_mem_ranges.resize( m_n_queries, max_string_length * m_n_queries );
thrust::for_each(
thrust::make_counting_iterator<uint32>(0u),
thrust::make_counting_iterator<uint32>(0u) + m_n_queries,
mem::split_mem_functor<fm_index_type,string_set_type>(
m_f_index,
m_r_index,
string_set,
split_len,
split_width,
min_intv,
max_intv,
min_span,
nvbio::plain_view( unsplit_mem_ranges ),
nvbio::plain_view( m_mem_ranges ) )
);
// fetch the number of output MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// extend them left
thrust::transform(
m_mem_ranges.m_arena.begin(),
m_mem_ranges.m_arena.begin() + n_ranges,
m_mem_ranges.m_arena.begin(),
mem::left_mem_functor<fm_index_type,string_set_type>(
m_f_index,
m_r_index,
string_set,
min_intv,
max_intv,
min_span )
);
// and discard MEM ranges we don't want to keep
mem::discard_ranges(
device_tag(),
m_n_queries,
m_mem_ranges,
max_intv,
min_span,
true );
}
}
// fetch the number of MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// reserve enough storage for the ranges
m_slots.resize( n_ranges );
if (n_ranges)
{
// reorder the arena by string-id
thrust::device_vector<rank_type> arena( n_ranges );
thrust::device_vector<uint32> slots( m_n_queries );
// scan the mem-range array sizes to get the new array slots
cuda::exclusive_scan(
m_n_queries,
m_mem_ranges.m_sizes.begin(),
slots.begin(),
thrust::plus<uint32>(),
0u,
d_temp_storage );
// and put everything in place
mem::reorder_ranges(
device_tag(),
m_n_queries,
nvbio::plain_view( m_mem_ranges ),
nvbio::plain_view( slots ),
nvbio::plain_view( arena ) );
// swap the new array indices and the arena
m_mem_ranges.m_index.swap( slots );
m_mem_ranges.m_arena.swap( arena );
// and now scan the range sizes
cuda::inclusive_scan(
n_ranges,
thrust::make_transform_iterator( m_mem_ranges.m_arena.begin(), mem::range_size<rank_type>() ),
m_slots.begin(),
thrust::plus<uint64>(),
d_temp_storage );
// fetch the total number of MEMs
m_n_occurrences = m_slots[ n_ranges - 1u ];
}
return m_n_occurrences;
}
// find the starting position for the MEM ranks corresponding to a given string
//
template <typename fm_index_type>
uint32 MEMFilter<device_tag, fm_index_type>::first_hit(const uint32 string_id) const
{
// fetch the total number of MEM ranges
if (string_id >= m_n_queries)
return m_n_occurrences;
const uint32 first_rank = m_mem_ranges.m_index[ string_id ];
// and find the corresponding MEM hits start
return first_rank ? m_slots[ first_rank-1u ] : 0u;
}
// enumerate all mems in a given range
//
// \tparam mems_iterator a mem_type iterator
//
// \param begin the beginning of the mems sequence to locate, in [0,n_mems)
// \param end the end of the mems sequence to locate, in [0,n_mems]
//
template <typename fm_index_type>
template <typename mems_iterator>
void MEMFilter<device_tag, fm_index_type>::locate(
const uint64 begin,
const uint64 end,
mems_iterator mems)
{
const uint32 n_hits = end - begin;
// fetch the number of output MEM ranges
const uint32 n_ranges = m_mem_ranges.allocated_size();
// fill the output hits with (SA,string-id) coordinates
thrust::transform(
thrust::make_counting_iterator<uint64>(0u) + begin,
thrust::make_counting_iterator<uint64>(0u) + end,
device_iterator( mems ),
mem::filter_results<coord_type>(
n_ranges,
nvbio::plain_view( m_slots ),
nvbio::plain_view( m_mem_ranges.m_arena ) ) );
// locate the SSA iterators
thrust::transform(
device_iterator( mems ),
device_iterator( mems ) + n_hits,
device_iterator( mems ),
mem::locate_ssa_results<fm_index_type>( m_f_index ) );
// and perform the final lookup
thrust::transform(
device_iterator( mems ),
device_iterator( mems ) + n_hits,
device_iterator( mems ),
mem::lookup_ssa_results<fm_index_type>( m_f_index ) );
}
// find the index i of the furthermost string such that filter.first_hit( j ) <= mem_count for each j < i
//
template <typename system_tag, typename fm_index_type>
uint32 string_batch_bound(const MEMFilter<system_tag, fm_index_type>& filter, const uint32 mem_count)
{
return uint32( thrust::upper_bound(
thrust::make_permutation_iterator(
filter.m_slots.begin(),
filter.m_mem_ranges.m_index.begin() ),
thrust::make_permutation_iterator(
filter.m_slots.begin(),
filter.m_mem_ranges.m_index.begin() ) + filter.m_n_queries,
mem_count ) - thrust::make_permutation_iterator(
filter.m_slots.begin(),
filter.m_mem_ranges.m_index.begin() ) );
}
} // namespace nvbio
|
Example_cancellation.2.c | /*
* @@name: cancellation.2c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#include <stddef.h>
typedef struct binary_tree_s {
int value;
struct binary_tree_s *left, *right;
} binary_tree_t;
binary_tree_t *search_tree(binary_tree_t *tree, int value, int level) {
binary_tree_t *found = NULL;
if (tree) {
if (tree->value == value) {
found = tree;
}
else {
#pragma omp task shared(found) if(level < 10)
{
binary_tree_t *found_left = NULL;
found_left = search_tree(tree->left, value, level + 1);
if (found_left) {
#pragma omp atomic write
found = found_left;
#pragma omp cancel taskgroup
}
}
#pragma omp task shared(found) if(level < 10)
{
binary_tree_t *found_right = NULL;
found_right = search_tree(tree->right, value, level + 1);
if (found_right) {
#pragma omp atomic write
found = found_right;
#pragma omp cancel taskgroup
}
}
#pragma omp taskwait
}
}
return found;
}
binary_tree_t *search_tree_parallel(binary_tree_t *tree, int value) {
binary_tree_t *found = NULL;
#pragma omp parallel shared(found, tree, value)
{
#pragma omp master
{
#pragma omp taskgroup
{
found = search_tree(tree, value, 0);
}
}
}
return found;
}
|
GB_binop__isgt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_fp32)
// A*D function (colscale): GB (_AxD__isgt_fp32)
// D*A function (rowscale): GB (_DxB__isgt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_fp32)
// C=scalar+B GB (_bind1st__isgt_fp32)
// C=scalar+B' GB (_bind1st_tran__isgt_fp32)
// C=A+scalar GB (_bind2nd__isgt_fp32)
// C=A'+scalar GB (_bind2nd_tran__isgt_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP32 || GxB_NO_ISGT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
is.c | /*************************************************************************
* *
* N A S P A R A L L E L B E N C H M A R K S 3.3 *
* *
* O p e n M P V E R S I O N *
* *
* I S *
* *
*************************************************************************
* *
* This benchmark is an OpenMP version of the NPB IS code. *
* It is described in NAS Technical Report 99-011. *
* *
* Permission to use, copy, distribute and modify this software *
* for any purpose with or without fee is hereby granted. We *
* request, however, that all derived work reference the NAS *
* Parallel Benchmarks 3.3. This software is provided "as is" *
* without express or implied warranty. *
* *
* Information on NPB 3.3, including the technical report, the *
* original specifications, source code, results and information *
* on how to submit new results, is available at: *
* *
* http://www.nas.nasa.gov/Software/NPB/ *
* *
* Send comments or suggestions to npb@nas.nasa.gov *
* *
* NAS Parallel Benchmarks Group *
* NASA Ames Research Center *
* Mail Stop: T27A-1 *
* Moffett Field, CA 94035-1000 *
* *
* E-mail: npb@nas.nasa.gov *
* Fax: (650) 604-3957 *
* *
*************************************************************************
* *
* Author: M. Yarrow *
* H. Jin *
* *
*************************************************************************/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "../my_include/my_include.h"
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* To disable the use of buckets, comment out the following line */
#define USE_BUCKETS
/* Uncomment below for cyclic schedule */
/*#define SCHED_CYCLIC*/
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS D */
/*************/
#if CLASS == 'D'
#define TOTAL_KEYS_LOG_2 31
#define MAX_KEY_LOG_2 27
#define NUM_BUCKETS_LOG_2 10
#endif
#if CLASS == 'D'
#define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2)
#else
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#endif
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
#if CLASS == 'D'
typedef long INT_TYPE;
#else
typedef int INT_TYPE;
#endif
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[MAX_KEY],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE],
**key_buff1_aptr = NULL;
#ifdef USE_BUCKETS
//INT_TYPE **bucket_size,
INT_TYPE bucket_ptrs[NUM_BUCKETS];
//bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);
INT_TYPE bucket_size[1][NUM_BUCKETS];
#pragma omp threadprivate(bucket_ptrs)
#endif
//kai
int flag1 = 0, flag2 = 0, flag3 = 0, flag4 = 0, flag5= 0, flag6 = 0, flag7 = 0, flag8 = 0;
//INT_TYPE *kw_bucket_size;
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895},
D_test_index_array[TEST_ARRAY_SIZE] =
{1317351170,995930646,1157283250,1503301535,1453734525},
D_test_rank_array[TEST_ARRAY_SIZE] =
{1,36538729,1978098519,2145192618,2147425337};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
static int KS=0;
static double R23, R46, T23, T46;
#pragma omp threadprivate(KS, R23, R46, T23, T46)
double randlc( double *X, double *A )
{
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************ F I N D _ M Y _ S E E D ************/
/************ ************/
/************ returns parallel random number seq seed ************/
/*****************************************************************/
/*
* Create a random number sequence of total length nn residing
* on np number of processors. Each processor will therefore have a
* subsequence of length nn/np. This routine returns that random
* number which is the first random number for the subsequence belonging
* to processor rank kn, and which is used as seed for proc kn ran # gen.
*/
double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */
int np, /* np = num procs */
long nn, /* total num of ran numbers, all procs */
double s, /* Ran num seed, for ex.: 314159265.00 */
double a ) /* Ran num gen mult, try 1220703125.00 */
{
double t1,t2;
long mq,nq,kk,ik;
if ( kn == 0 ) return s;
mq = (nn/4 + np - 1) / np;
nq = mq * 4 * kn; /* number of rans to be skipped */
t1 = s;
t2 = a;
kk = nq;
while ( kk > 1 ) {
ik = kk / 2;
if( 2 * ik == kk ) {
(void)randlc( &t2, &t2 );
kk = ik;
}
else {
(void)randlc( &t1, &t2 );
kk = kk - 1;
}
}
(void)randlc( &t1, &t2 );
return( t1 );
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x, s;
INT_TYPE i, k;
#pragma omp parallel private(x,s,i,k)
{
INT_TYPE k1, k2;
double an = a;
int myid, num_procs;
INT_TYPE mq;
#ifdef _OPENMP
myid = omp_get_thread_num();
num_procs = omp_get_num_threads();
#else
myid = 0;
num_procs = 1;
#endif
mq = (NUM_KEYS + num_procs - 1) / num_procs;
k1 = mq * myid;
k2 = k1 + mq;
if ( k2 > NUM_KEYS ) k2 = NUM_KEYS;
KS = 0;
s = find_my_seed( myid, num_procs,
(long)4*NUM_KEYS, seed, an );
k = MAX_KEY/4;
for (i=k1; i<k2; i++)
{
x = randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
key_array[i] = k*x;
}
} /*omp parallel*/
}
/*****************************************************************/
/***************** Allocate Working Buffer ****************/
/*****************************************************************/
void *alloc_mem( size_t size )
{
void *p;
p = (void *)malloc(size);
if (!p) {
perror("Memory allocation error");
exit(1);
}
return p;
}
void alloc_key_buff( void )
{
INT_TYPE i;
int num_procs;
#ifdef _OPENMP
num_procs = omp_get_max_threads();
#else
num_procs = 1;
#endif
#ifdef USE_BUCKETS
/* kw_bucket_size = (INT_TYPE*)alloc_mem(sizeof(INT)*NUM_BUCKETS*num_procs);
//bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
for (i = 0; i < num_procs; i++) {
//bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);
}
*/
#pragma omp parallel for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = 0;
//printf("111\n");
#else /*USE_BUCKETS*/
key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
key_buff1_aptr[0] = key_buff1;
for (i = 1; i < num_procs; i++) {
key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY);
}
//printf("2222\n");
#endif /*USE_BUCKETS*/
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify( void )
{
INT_TYPE i, j;
INT_TYPE k, k1, k2;
/* Now, finally, sort the keys: */
/* Copy keys into work array; keys in key_array will be reassigned. */
#ifdef USE_BUCKETS
/* Buckets are already sorted. Sorting keys within each bucket */
#ifdef SCHED_CYCLIC
#pragma omp parallel for private(i,j,k,k1) schedule(static,1)
#else
#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)
#endif
for( j=0; j< NUM_BUCKETS; j++ ) {
k1 = (j > 0)? bucket_ptrs[j-1] : 0;
for ( i = k1; i < bucket_ptrs[j]; i++ ) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
#else
#pragma omp parallel private(i,j,k,k1,k2)
{
#pragma omp for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
/* This is actual sorting. Each thread is responsible for
a subset of key values */
j = omp_get_num_threads();
j = (MAX_KEY + j - 1) / j;
k1 = j * omp_get_thread_num();
k2 = k1 + j;
if (k2 > MAX_KEY) k2 = MAX_KEY;
for( i=0; i<NUM_KEYS; i++ ) {
if (key_buff2[i] >= k1 && key_buff2[i] < k2) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
} /*omp parallel*/
#endif
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for reduction(+:j)
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
printf( "Full_verify: number of keys out of sort: %ld\n", (long)j );
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
flag1 = -1;flag2 = -1;flag3 = -1;flag4 = 0;flag5 = -1;flag6 = -1;flag7 = -1;flag8 = -1;
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
#ifdef USE_BUCKETS
int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE num_bucket_keys = (1L << shift);
#endif
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
// if(iteration == 2) printf("max flag1 = %d\n",TEST_ARRAY_SIZE);
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ ) {
partial_verify_vals[i] = key_array[test_index_array[i]];
//kai
// flag1 = i;
}
/* Setup pointers to key buffers */
#ifdef USE_BUCKETS
key_buff_ptr2 = key_buff2;
#else
key_buff_ptr2 = key_array;
#endif
key_buff_ptr = key_buff1;
#pragma omp parallel private(i, k)
{
INT_TYPE *work_buff, m, k1, k2;
int myid = 0, num_procs = 1;
#ifdef _OPENMP
myid = omp_get_thread_num();
num_procs = omp_get_num_threads();
#endif
/* Bucket sort is known to improve cache performance on some */
/* cache based systems. But the actual performance may depend */
/* on cache size, problem size. */
#ifdef USE_BUCKETS
work_buff = bucket_size[myid];
/* Initialize */
for( i=0; i<NUM_BUCKETS; i++ )
work_buff[i] = 0;
// if(iteration == 2) printf("max flag2 = %d\n",NUM_KEYS);
/* Determine the number of keys in each bucket */
#pragma omp for schedule(static)
for( i=0; i<NUM_KEYS; i++ ) {
work_buff[key_array[i] >> shift]++;
//kai
//flag2 = i;
}
/* Accumulative bucket sizes are the bucket pointers.
These are global sizes accumulated upon to each bucket */
// if(iteration == 2) printf("max flag3 = %d\n",myid);
bucket_ptrs[0] = 0;
for( k=0; k< myid; k++ ) {
bucket_ptrs[0] += bucket_size[k][0];
//kai
// flag3 = k;
}
//if(iteration == 2) printf("max flag4 = %d\n",NUM_BUCKETS);
for( i=1; i< NUM_BUCKETS; i++ ) {
bucket_ptrs[i] = bucket_ptrs[i-1];
for( k=0; k< myid; k++ ) {
bucket_ptrs[i] += bucket_size[k][i];
}
for( k=myid; k< num_procs; k++ ) {
bucket_ptrs[i] += bucket_size[k][i-1];
}
//kai
flag4 = i;
}
//if(iteration == 2) printf("max flag5 = %d\n",NUM_KEYS);
/* Sort into appropriate bucket */
#pragma omp for schedule(static)
for( i=0; i<NUM_KEYS; i++ )
{
k = key_array[i];
key_buff2[bucket_ptrs[k >> shift]++] = k;
//kai
flag5 = i;
}
//if(iteration == 2) printf("max flag6 = %d\n, myid = %d, num_procs-1 = %d\n",NUM_BUCKETS,myid,num_procs-1);
/* The bucket pointers now point to the final accumulated sizes */
if (myid < num_procs-1) {
for( i=0; i< NUM_BUCKETS; i++ ) {
for( k=myid+1; k< num_procs; k++ ){
bucket_ptrs[i] += bucket_size[k][i];
}
//kai
//flag6 = i;
}
}
/* Now, buckets are sorted. We only need to sort keys inside
each bucket, which can be done in parallel. Because the distribution
of the number of keys in the buckets is Gaussian, the use of
a dynamic schedule should improve load balance, thus, performance */
//if(iteration == 2) printf("max flag7 = %d\n",NUM_BUCKETS);
#ifdef SCHED_CYCLIC
#pragma omp for schedule(static,1)
#else
#pragma omp for schedule(dynamic)
#endif
for( i=0; i< NUM_BUCKETS; i++ ) {
/* Clear the work array section associated with each bucket */
k1 = i * num_bucket_keys;
k2 = k1 + num_bucket_keys;
for ( k = k1; k < k2; k++ ) {
key_buff_ptr[k] = 0;
}
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
m = (i > 0)? bucket_ptrs[i-1] : 0;
for ( k = m; k < bucket_ptrs[i]; k++ ) {
key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */
/* population */
//kai
flag1 = k;
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
key_buff_ptr[k1] += m;
for ( k = k1+1; k < k2; k++ ) {
key_buff_ptr[k] += key_buff_ptr[k-1];
flag2 = k;
}
//kai
flag7 = i;
}
#else /*USE_BUCKETS*/
#endif /*USE_BUCKETS*/
} /*omp parallel*/
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 < k && k <= NUM_KEYS-1 )
{
INT_TYPE key_rank = key_buff_ptr[k-1];
int failed = 0;
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+(iteration-2) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+(iteration-1) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-(iteration-1) )
failed = 1;
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'D':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
}
if( failed == 1 )
{
int ground_truth = 0;
if( i <= 2 )
{
ground_truth = test_rank_array[i]+(iteration-1);
}
else
{
ground_truth = key_rank != test_rank_array[i]-(iteration-1);
}
printf( "Failed partial verification: "
"iteration %d, test key %d, our result %d, ground truth is %d\n",
iteration, (int)i,key_rank,ground_truth );
}
}
//kai
//flag8 = i;
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main( int argc, char **argv )
{
printf("buffer size addr is %p\n",bucket_size);
printf("buffer size[0] addr is %p\n",bucket_size[0]);
//kai
crucial_data(key_array, "int", SIZE_OF_BUFFERS);
crucial_data(key_buff1, "int", MAX_KEY);
crucial_data(key_buff2, "int",SIZE_OF_BUFFERS);
crucial_data(partial_verify_vals, "int", TEST_ARRAY_SIZE);
crucial_data(bucket_ptrs, "int", NUM_BUCKETS);
crucial_data(bucket_size, "int", NUM_BUCKETS);
consistent_data(&flag1, "int", 1);
consistent_data(&flag2, "int", 1);
consistent_data(&flag3, "int", 1);
consistent_data(&flag4, "int", 1);
consistent_data(&flag5, "int", 1);
consistent_data(&flag6, "int", 1);
consistent_data(&flag7, "int", 1);
consistent_data(&flag8, "int", 1);
int i, iteration, timer_on;
double timecounter;
FILE *fp;
/* Initialize timers */
timer_on = 0;
if ((fp = fopen("timer.flag", "r")) != NULL) {
fclose(fp);
timer_on = 1;
}
timer_clear( 0 );
if (timer_on) {
timer_clear( 1 );
timer_clear( 2 );
timer_clear( 3 );
}
if (timer_on) timer_start( 3 );
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf
( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" );
printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
#ifdef _OPENMP
printf( " Number of available threads: %d\n", omp_get_max_threads() );
#endif
printf( "\n" );
if (timer_on) timer_start( 1 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
alloc_key_buff();
if (timer_on) timer_stop( 1 );
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
//kai
consistent_data(&iteration, "int", 1);
flush_whole_cache();
start_crash();
/* This is the main iteration */
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
}
//kai
end_crash();
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
if (timer_on) timer_start( 2 );
full_verify();
if (timer_on) timer_stop( 2 );
if (timer_on) timer_stop( 3 );
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
(int)(TOTAL_KEYS/64),
64,
0,
MAX_ITERATIONS,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS );
/* Print additional timers */
if (timer_on) {
double t_total, t_percent;
t_total = timer_read( 3 );
printf("\nAdditional timers -\n");
printf(" Total execution: %8.3f\n", t_total);
if (t_total == 0.0) t_total = 1.0;
timecounter = timer_read(1);
t_percent = timecounter/t_total * 100.;
printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(0);
t_percent = timecounter/t_total * 100.;
printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(2);
t_percent = timecounter/t_total * 100.;
printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent);
}
return 0;
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
instruments.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file instruments.h
* \brief Measurement functions
*/
#ifndef INSTRUMENTS_H_
#define INSTRUMENTS_H_
namespace qpp {
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param dims Dimensions of the multi-partite system
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar or
* column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, const std::vector<idx>& dims) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rphi))
throw exception::ZeroSize("qpp::ip()");
// check zero-size
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check column vector
if (!internal::check_cvector(rphi))
throw exception::MatrixNotCvector("qpp::ip()");
// check column vector
if (!internal::check_cvector(rpsi))
throw exception::MatrixNotCvector("qpp::ip()");
// check that dims is a valid dimension vector
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::ip()");
// check that subsys are valid w.r.t. dims
if (!internal::check_subsys_match_dims(subsys, dims))
throw exception::SubsysMismatchDims("qpp::ip()");
// check that dims match psi column vector
if (!internal::check_dims_match_cvect(dims, rpsi))
throw exception::DimsMismatchCvector("qpp::ip()");
// check that subsys match pho column vector
std::vector<idx> subsys_dims(subsys.size());
for (idx i = 0; i < subsys.size(); ++i)
subsys_dims[i] = dims[subsys[i]];
if (!internal::check_dims_match_cvect(subsys_dims, rphi))
throw exception::DimsMismatchCvector("qpp::ip()");
// END EXCEPTION CHECKS
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx D = static_cast<idx>(rpsi.rows());
idx Dsubsys_bar = D / Dsubsys;
idx n = dims.size();
idx n_subsys = subsys.size();
idx n_subsys_bar = n - n_subsys;
idx Cdims[maxn];
idx Csubsys[maxn];
idx Cdimssubsys[maxn];
idx Csubsys_bar[maxn];
idx Cdimssubsys_bar[maxn];
std::vector<idx> subsys_bar = complement(subsys, n);
std::copy(std::begin(subsys_bar), std::end(subsys_bar),
std::begin(Csubsys_bar));
for (idx i = 0; i < n; ++i) {
Cdims[i] = dims[i];
}
for (idx i = 0; i < n_subsys; ++i) {
Csubsys[i] = subsys[i];
Cdimssubsys[i] = dims[subsys[i]];
}
for (idx i = 0; i < n_subsys_bar; ++i) {
Cdimssubsys_bar[i] = dims[subsys_bar[i]];
}
auto worker = [&](idx b) noexcept->typename Derived::Scalar {
idx Cmidxrow[maxn];
idx Cmidxrowsubsys[maxn];
idx Cmidxcolsubsys_bar[maxn];
/* get the col multi-indexes of the complement */
internal::n2multiidx(b, n_subsys_bar, Cdimssubsys_bar,
Cmidxcolsubsys_bar);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys_bar; ++k) {
Cmidxrow[Csubsys_bar[k]] = Cmidxcolsubsys_bar[k];
}
typename Derived::Scalar result = 0;
for (idx a = 0; a < Dsubsys; ++a) {
/* get the row multi-indexes of the subsys */
internal::n2multiidx(a, n_subsys, Cdimssubsys, Cmidxrowsubsys);
/* write it in the global row multi-index */
for (idx k = 0; k < n_subsys; ++k) {
Cmidxrow[Csubsys[k]] = Cmidxrowsubsys[k];
}
// compute the row index
idx i = internal::multiidx2n(Cmidxrow, n, Cdims);
result += std::conj(rphi(a)) * rpsi(i);
}
return result;
}; /* end worker */
dyn_col_vect<typename Derived::Scalar> result(Dsubsys_bar);
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx m = 0; m < Dsubsys_bar; ++m)
result(m) = worker(m);
return result;
}
/**
* \brief Generalized inner product
*
* \param phi Column vector Eigen expression
* \param psi Column vector Eigen expression
* \param subsys Subsystem indexes over which \a phi is defined
* \param d Subsystem dimensions
* \return Inner product \f$\langle \phi_{subsys}|\psi\rangle\f$, as a scalar or
* column vector over the remaining Hilbert space
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
ip(const Eigen::MatrixBase<Derived>& phi, const Eigen::MatrixBase<Derived>& psi,
const std::vector<idx>& subsys, idx d = 2) {
const dyn_col_vect<typename Derived::Scalar>& rphi = phi.derived();
const dyn_col_vect<typename Derived::Scalar>& rpsi = psi.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rpsi))
throw exception::ZeroSize("qpp::ip()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::ip()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rpsi.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return ip(phi, psi, subsys, dims);
}
// full measurements
/**
* \brief Measures the state vector or density operator \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the Kraus operators
if (Ks.empty())
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Ks[0].rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates(Ks.size());
//************ density matrix ************//
if (internal::check_square_mat(rA)) // square matrix
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = cmat::Zero(rA.rows(), rA.rows());
cmat tmp = Ks[i] * rA * adjoint(Ks[i]); // un-normalized;
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0)
outstates[i] = tmp / prob[i]; // normalized
}
}
//************ ket ************//
else if (internal::check_cvector(rA)) // column vector
{
for (idx i = 0; i < Ks.size(); ++i) {
outstates[i] = ket::Zero(rA.rows());
ket tmp = Ks[i] * rA; // un-normalized;
// probability
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0)
outstates[i] = tmp / std::sqrt(prob[i]); // normalized
}
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the state vector or density matrix \a A using the set of
* Kraus operators \a Ks
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks) {
return measure(A, std::vector<cmat>(Ks));
}
/**
* \brief Measures the state vector or density matrix \a A in the orthonormal
* basis specified by the unitary matrix \a U
*
* \param A Eigen expression
* \param U Unitary matrix whose columns represent the measurement basis vectors
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& U) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check the unitary basis matrix U
if (!internal::check_nonzero_size(U))
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(U))
throw exception::MatrixNotSquare("qpp::measure()");
if (U.rows() != rA.rows())
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
std::vector<cmat> Ks(U.rows());
for (idx i = 0; i < static_cast<idx>(U.rows()); ++i)
Ks[i] = U.col(i) * adjoint(U.col(i));
return measure(rA, Ks);
}
// partial measurements
/**
* \brief Measures the part \a subsys of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, const std::vector<idx>& dims,
bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx D = prod(std::begin(dims), std::end(dims));
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
idx Dsubsys_bar = D / Dsubsys;
// check the Kraus operators
if (Ks.empty())
throw exception::ZeroSize("qpp::measure()");
if (!internal::check_square_mat(Ks[0]))
throw exception::MatrixNotSquare("qpp::measure()");
if (Dsubsys != static_cast<idx>(Ks[0].rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
for (auto&& elem : Ks)
if (elem.rows() != Ks[0].rows() || elem.cols() != Ks[0].rows())
throw exception::DimsNotEqual("qpp::measure()");
// END EXCEPTION CHECKS
// probabilities
std::vector<double> prob(Ks.size());
// resulting states
std::vector<cmat> outstates;
if (destructive)
outstates.resize(Ks.size(), cmat::Zero(Dsubsys_bar, Dsubsys_bar));
else
outstates.resize(Ks.size(), cmat::Zero(D, D));
//************ ket ************//
if (internal::check_cvector(rA)) // column vector
{
for (idx i = 0; i < Ks.size(); ++i) {
ket tmp = apply(rA, Ks[i], target, dims);
prob[i] = std::pow(norm(tmp), 2);
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
tmp /= std::sqrt(prob[i]);
if (destructive)
outstates[i] = ptrace(tmp, target, dims);
else
outstates[i] = tmp;
}
}
}
//************ density matrix ************//
else // square matrix
{
for (idx i = 0; i < Ks.size(); ++i) {
cmat tmp = apply(rA, Ks[i], target, dims);
if (destructive)
tmp = ptrace(tmp, target, dims);
prob[i] = std::abs(trace(tmp)); // probability
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result i
outstates[i] = tmp / prob[i];
}
}
}
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
const std::vector<idx>& dims, bool destructive = true) {
return measure(A, std::vector<cmat>(Ks), target, dims, destructive);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const std::vector<cmat>& Ks,
const std::vector<idx>& target, idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, Ks, target, dims, destructive);
}
// std::initializer_list overload, avoids ambiguity for 2-element lists, see
// http://stackoverflow.com
// /questions/26750039/ambiguity-when-using-initializer-list-as-parameter
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A using the set of Kraus operators \a Ks
* \see qpp::measure_seq()
*
* \note The dimension of all \a Ks must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param Ks Set of Kraus operators
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A,
const std::initializer_list<cmat>& Ks, const std::vector<idx>& target,
idx d = 2, bool destructive = true) {
return measure(A, std::vector<cmat>(Ks), target, d, destructive);
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, const std::vector<idx>& dims,
bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure()");
std::vector<idx> subsys_dims(target.size());
for (idx i = 0; i < target.size(); ++i)
subsys_dims[i] = dims[target[i]];
idx Dsubsys = prod(std::begin(subsys_dims), std::end(subsys_dims));
// check the matrix V
if (!internal::check_nonzero_size(V))
throw exception::ZeroSize("qpp::measure()");
if (Dsubsys != static_cast<idx>(V.rows()))
throw exception::DimsMismatchMatrix("qpp::measure()");
// END EXCEPTION CHECKS
// number of basis elements or number of rank-1 projectors
idx M = static_cast<idx>(V.cols());
//************ ket ************//
if (internal::check_cvector(rA)) {
const ket& rpsi = A.derived();
std::vector<double> prob(M); // probabilities
std::vector<cmat> outstates(M); // resulting states
#ifdef WITH_OPENMP_
#pragma omp parallel for
#endif // WITH_OPENMP_
for (idx i = 0; i < M; ++i) {
if (destructive)
outstates[i] =
ip(static_cast<const ket&>(V.col(i)), rpsi, target, dims);
else
outstates[i] = apply(rpsi, prj(V.col(i)), target, dims);
}
for (idx i = 0; i < M; ++i) {
double tmp = norm(outstates[i]);
prob[i] = tmp * tmp;
if (prob[i] > 0) {
// normalized output state
// corresponding to measurement result m
outstates[i] /= tmp;
}
}
// sample from the probability distribution
std::discrete_distribution<idx> dd(std::begin(prob), std::end(prob));
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
idx result = dd(gen);
return std::make_tuple(result, prob, outstates);
}
//************ density matrix ************//
else {
std::vector<cmat> Ks(M);
for (idx i = 0; i < M; ++i)
Ks[i] = V.col(i) * adjoint(V.col(i));
return measure(rA, Ks, target, dims, destructive);
}
}
/**
* \brief Measures the part \a target of the multi-partite state vector or
* density matrix \a A in the orthonormal basis or rank-1 projectors specified
* by the columns of the matrix \a V
* \see qpp::measure_seq()
*
* \note The dimension of \a V must match the dimension of \a target. If
* \a destructive is set to true (by default), the measurement is destructive,
* i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param V Matrix whose columns represent the measurement basis vectors or the
* bra parts of the rank-1 projectors
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Result of the measurement, 2. Vector of outcome
* probabilities, and 3. Vector of post-measurement normalized states
*/
template <typename Derived>
std::tuple<idx, std::vector<double>, std::vector<cmat>>
measure(const Eigen::MatrixBase<Derived>& A, const cmat& V,
const std::vector<idx>& target, idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure(rA, V, target, dims, destructive);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \note If \a destructive is set to true (by default), the measurement is
* destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param dims Dimensions of the multi-partite system
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
std::vector<idx> dims, bool destructive = true) {
// typename std::remove_const<
// typename Eigen::MatrixBase<Derived>::EvalReturnType
// >::type rA = A.derived();
dyn_mat<typename Derived::Scalar> rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::measure_seq()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::measure_seq()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::measure_seq()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::measure_seq()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::measure_seq()");
// END EXCEPTION CHECKS
std::vector<idx> result;
double prob = 1;
// sort target in decreasing order,
// the order of measurements does not matter
std::sort(std::begin(target), std::end(target), std::greater<idx>{});
//************ density matrix or column vector ************//
while (target.size() > 0) {
auto tmp = measure(rA, Gates::get_instance().Id(dims[target[0]]),
{target[0]}, dims, destructive);
result.emplace_back(std::get<0>(tmp));
prob *= std::get<1>(tmp)[std::get<0>(tmp)];
rA = std::get<2>(tmp)[std::get<0>(tmp)];
if (destructive) {
// remove the subsystem
dims.erase(std::next(std::begin(dims), target[0]));
}
target.erase(std::begin(target));
}
// order result in increasing order with respect to target
std::reverse(std::begin(result), std::end(result));
return std::make_tuple(result, prob, rA);
}
/**
* \brief Sequentially measures the part \a target of the multi-partite state
* vector or density matrix \a A in the computational basis
* \see qpp::measure()
*
* \note If \a destructive is set to true (by default), the measurement is
* destructive, i.e. the measured subsystems are traced away.
*
* \param A Eigen expression
* \param target Subsystem indexes that are measured
* \param d Subsystem dimensions
* \param destructive Destructive measurement, true by default
* \return Tuple of: 1. Vector of outcome results of the
* measurement (ordered in increasing order with respect to \a target, i.e.
* first measurement result corresponds to the subsystem with the smallest
* index), 2. Outcome probability, and 3. Post-measurement normalized state
*/
template <typename Derived>
std::tuple<std::vector<idx>, double, cmat>
measure_seq(const Eigen::MatrixBase<Derived>& A, std::vector<idx> target,
idx d = 2, bool destructive = true) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::measure_seq()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::measure_seq()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return measure_seq(rA, target, dims, destructive);
}
/**
* \brief Resets qudits from the multi-partite state vector or density matrix
* \a A by performing a non-destructive measurement in the computational basis
* on the \a target qudits and discarding the measurement results, followed by
* shifting them back to the \f$|0\cdots 0\rangle\f$ state
*
* \param A Eigen expression
* \param target Target qudit indexes that are reset
* \param dims Dimensions of the multi-partite system
* \return Reset quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reset(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target,
std::vector<idx> dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reset()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::reset()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::reset()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::reset()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::reset()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::reset()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result;
std::vector<idx> resZ;
std::tie(resZ, std::ignore, result) = measure_seq(rA, target, dims, false);
for (idx i = 0; i < target.size(); ++i) {
cmat correction =
powm(Gates::get_instance().Xd(dims[i]), dims[i] - resZ[i]);
result = apply(result, correction, {target[i]}, dims);
}
return result;
}
/**
* \brief Resets qudits from the multi-partite state vector or density matrix
* \a A by performing a non-destructive measurement in the computational basis
* on the \a target qudits and discarding the measurement results, followed by
* shifting them back to the \f$|0\cdots 0\rangle\f$ state
*
* \param A Eigen expression
* \param target Target qudit indexes that are reset
* \param d Subsystem dimensions
* \return Reset quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reset(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reset()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::reset()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return reset(rA, target, dims);
}
/**
* \brief Discards qudits from the multi-partite state vector or density matrix
* \a A by performing a destructive measurement in the computational basis on
* the \a target qudits and discarding the measurement results
*
* \param A Eigen expression
* \param target Target qudit indexes that are discarded
* \param dims Dimensions of the multi-partite system
* \return Resulting quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> discard(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target,
std::vector<idx> dims) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::discard()");
// check that dimension is valid
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::discard()");
// check valid state and matching dimensions
if (internal::check_cvector(rA)) {
if (!internal::check_dims_match_cvect(dims, rA))
throw exception::DimsMismatchCvector("qpp::discard()");
} else if (internal::check_square_mat(rA)) {
if (!internal::check_dims_match_mat(dims, rA))
throw exception::DimsMismatchMatrix("qpp::discard()");
} else
throw exception::MatrixNotSquareNorCvector("qpp::discard()");
// check that target is valid w.r.t. dims
if (!internal::check_subsys_match_dims(target, dims))
throw exception::SubsysMismatchDims("qpp::discard()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result;
std::tie(std::ignore, std::ignore, result) = measure_seq(rA, target, dims);
return result;
}
/**
* \brief Discards qudits from the multi-partite state vector or density matrix
* \a A by performing a destructive measurement in the computational basis on
* the \a target qudits and discarding the measurement results
*
* \param A Eigen expression
* \param target Target qudit indexes that are discarded
* \param d Subsystem dimensions
* \return Resulting quantum state
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> discard(const Eigen::MatrixBase<Derived>& A,
std::vector<idx> target, idx d = 2) {
const typename Eigen::MatrixBase<Derived>::EvalReturnType& rA = A.derived();
// EXCEPTION CHECKS
// check zero size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::discard()");
// check valid dims
if (d < 2)
throw exception::DimsInvalid("qpp::discard()");
// END EXCEPTION CHECKS
idx n = internal::get_num_subsys(static_cast<idx>(rA.rows()), d);
std::vector<idx> dims(n, d); // local dimensions vector
return discard(rA, target, dims);
}
} /* namespace qpp */
#endif /* INSTRUMENTS_H_ */
|
sample.c | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <dirent.h>
#include <string.h>
#include <sndfile.h>
#include <x86intrin.h>
#include <stdbool.h>
#include "sample.h"
#include "global.h"
#include "controls.h"
#include "conftuning.h"
#include "rclowpass.h"
#include "mem.h"
// ----------------------------------------------------------------------------
// sample_interp
// ----------------------------------------------------------------------------
inline void sample_interp(Sample * sample, double idx, __m128d * LR)
{
__m128d a, b;
int i = (int)idx;
double mu = idx - (double)i;
a[0] = (double)sample->data[2 * i];
a[1] = (double)sample->data[2 * i + 1];
b[0] = (double)sample->data[2 * i + 2];
b[1] = (double)sample->data[2 * i + 3];
*LR = (a + mu * (b - a));
}
// Used for both initialization and freeing data.
static void _sstore_init(int freeMem)
{
int key, layer, var;
Sample *sample;
for (key = 0; key < 128; ++key) {
_sStore.numLayers[key] = 0;
for (layer = 0; layer < MAX_LAYERS; ++layer) {
_sStore.numSamples[key][layer] = 0;
_sStore.rrIdx[key][layer] = 0;
for (var = 0; var < MAX_VARS; ++var) {
sample = &(_sStore.sample[key][layer][var]);
sample->owner = 0;
sample->len = 0;
sample->idx0 = 0;
sample->rms = 0;
sample->speed = 1;
if (freeMem && sample->data != NULL && sample->owner) {
free(sample->data);
}
sample->data = NULL;
}
}
}
}
// ----------------------------------------------------------------------------
// sstore_init
// ----------------------------------------------------------------------------
void sstore_init()
{
_sstore_init(0);
}
// ----------------------------------------------------------------------------
// sstore_load
// ----------------------------------------------------------------------------
static int _parse_sample_filename(char *name, int *key, int *layer, int *var)
{
if (sscanf(name, "on-%d-%d-%d", key, layer, var) != 3) {
return 1;
}
if (*key < 0 || *key > 127 ||
*layer < 1 || *layer > MAX_LAYERS || *var < 1 || *var > MAX_VARS) {
return 1;
}
*layer -= 1;
*var -= 1;
return 0;
}
static void _load_sample(Sample * s, char *fn, double st)
{
if (s->data != NULL) {
printf("Attempt to load already loaded sample.\n");
exit(1);
}
SF_INFO fileInfo;
fileInfo.format = 0;
fileInfo.channels = 2;
SNDFILE *sndFile = sf_open(fn, SFM_READ, &fileInfo);
if (fileInfo.channels != 2) {
printf("Samples must be stereo files.\n");
return;
}
if (fileInfo.samplerate != 48000) {
printf("Samples must be 48 kHz.\n");
return;
}
s->owner = 1;
s->len = fileInfo.frames;
s->idx0 = 0;
s->rms = 1.0;
s->speed = pow(2.0, st / 12.0);
// We over-allocate our array by one sample for each channel.
// This makes our interpolation code simpler, as we can rely on having an
// extra zero sample beyond the final one.
s->data = malloc_exit(2 * (s->len + 1) * sizeof(int16_t));
int count = sf_read_short(sndFile, s->data, 2 * fileInfo.frames);
if (count != 2 * fileInfo.frames) {
printf("Failed to read all samples for file: %s\n", fn);
printf(" %i != %i\n", count, 2 * (int)fileInfo.frames);
exit(1);
}
if (sf_close(sndFile) != 0) {
printf("Failed to close file: %s\n", fn);
return;
}
// Zero out the additional left/right samples.
s->data[2 * s->len] = 0;
s->data[2 * s->len + 1] = 0;
}
void sstore_load()
{
DIR *dir;
struct dirent *entry;
int key, layer, var, stop;
double tuning;
dir = opendir(".");
if (dir == NULL) {
printf("Failed to open samples directory.\n");
exit(1);
}
stop = 0;
#pragma omp parallel private(entry, key, layer, var, tuning)
while (!stop) {
#pragma omp critical
{
entry = readdir(dir);
if (entry == NULL) {
stop = 1;
// This silences a warning about an uninitialized variable.
tuning = 0;
} else {
tuning = conftuning_semitones(entry->d_name);
}
}
if (stop) {
continue;
}
// Skip non-regular files and directories.
if (entry->d_type != DT_REG) {
continue;
}
// Skip incorrectly named files.
if (_parse_sample_filename(entry->d_name, &key, &layer, &var) != 0) {
printf("Failed to parse filename: %s\n", entry->d_name);
continue;
}
if (layer >= _sStore.numLayers[key]) {
_sStore.numLayers[key] = layer + 1;
}
if (var >= _sStore.numSamples[key][layer]) {
_sStore.numSamples[key][layer] = var + 1;
}
// Load sample with tuning information.
_load_sample(&(_sStore.sample[key][layer][var]), entry->d_name,
tuning);
}
closedir(dir);
}
// ----------------------------------------------------------------------------
// sstore_free_data
// ----------------------------------------------------------------------------
void sstore_free_data()
{
_sstore_init(1);
}
// ----------------------------------------------------------------------------
// sstore_crop
// ----------------------------------------------------------------------------
static void _crop_sample(Sample * sample, int th)
{
int i;
for (i = 0; i < sample->len; ++i) {
if (sample->data[2 * i] >= th ||
sample->data[2 * i] <= -th ||
sample->data[2 * i + 1] >= th || sample->data[2 * i + 1] <= -th) {
break;
}
}
sample->idx0 = i;
}
void sstore_crop(double thF)
{
int th = (int)(thF / INT16_SCALE);
int key, layer, var;
#pragma omp parallel for private(key, layer, var) schedule(dynamic)
for (key = 0; key < 128; ++key) {
for (layer = 0; layer < _sStore.numLayers[key]; ++layer) {
for (var = 0; var < _sStore.numSamples[key][layer]; ++var) {
_crop_sample(&(_sStore.sample[key][layer][var]), th);
}
}
}
}
// ----------------------------------------------------------------------------
// sstore_compute_rms
// ----------------------------------------------------------------------------
void _compute_sample_rms(Sample * sample, int di)
{
int iMax = sample->idx0 + di;
if (iMax > sample->len) {
iMax = sample->len;
}
iMax *= 2;
double x;
double rms = 0;
double count = 0;
int i;
for (i = sample->idx0; i < iMax; ++i) {
++count;
x = (double)(sample->data[i] * INT16_SCALE);
rms += x * x;
}
rms /= count;
rms = sqrt(rms);
sample->rms = rms;
}
void sstore_compute_rms(double dt)
{
int key, layer, var;
int di = (int)(dt * SAMPLE_RATE);
#pragma omp parallel for private(key, layer, var) schedule(dynamic)
for (key = 0; key < 128; ++key) {
for (layer = 0; layer < _sStore.numLayers[key]; ++layer) {
for (var = 0; var < _sStore.numSamples[key][layer]; ++var) {
_compute_sample_rms(&(_sStore.sample[key][layer][var]), di);
}
}
}
}
// ----------------------------------------------------------------------------
// sstore_fill_samples
// ----------------------------------------------------------------------------
// Return true if a sample was coppied.
static bool _copy_samples(int toKey, int fromKey, bool owned) {
if(toKey < 0 || toKey > 127 || fromKey < 0 ||
fromKey > 127 || toKey == fromKey) {
return false;
}
// If fromKey doesn't have any layers, we can't do anything.
if(!_sStore.numLayers[fromKey]) {
return false;
}
// toKey should have the same number of layers. If it has 0, we'll do
// a full copy.
if(_sStore.numLayers[toKey] == 0) {
_sStore.numLayers[toKey] = _sStore.numLayers[fromKey];
}
// The number of layers must be the same - this is true when doing
// borrowing.
if(_sStore.numLayers[toKey] != _sStore.numLayers[fromKey]) {
return false;
}
bool coppied = false;
for(int layer = 0; layer < _sStore.numLayers[toKey]; ++layer) {
for(int var = 0; var < _sStore.numSamples[fromKey][layer]; ++var) {
Sample *fromSample = &(_sStore.sample[fromKey][layer][var]);
// If we're doing borrowing for round-robbin, we only want owned
// samples.
if(owned && !fromSample->owner) {
continue;
}
int toVar = _sStore.numSamples[toKey][layer];
Sample *toSample = &(_sStore.sample[toKey][layer][toVar]);
*toSample = *fromSample;
toSample->owner = false;
toSample->speed =
fromSample->speed * pow(2, (double)(toKey - fromKey) / 12);
_sStore.numSamples[toKey][layer]++;
coppied = true;
}
}
return coppied;
}
void sstore_fill_samples()
{
for(int i = 1; i < 127; ++i) {
// Fill from lower key.
int fromKey = 127;
while(--fromKey) {
int toKey = fromKey + 1;
if(_sStore.numLayers[toKey] == 0 &&
_sStore.numLayers[fromKey] != 0) {
_copy_samples(toKey, fromKey, false);
}
}
// Fill from upper key.
for(int fromKey = 0; fromKey < 127; ++fromKey) {
int toKey = fromKey - 1;
if(_sStore.numLayers[toKey] == 0 &&
_sStore.numLayers[fromKey] != 0) {
_copy_samples(toKey, fromKey, false);
}
}
}
}
// ----------------------------------------------------------------------------
// sstore_borrow_samples
// ----------------------------------------------------------------------------
void sstore_borrow_samples(int maxDist)
{
for(int dist = 1; dist < maxDist + 1; ++dist) {
for(int toKey = 0; toKey < 128; ++toKey) {
if(_sStore.numLayers[toKey] == 0) {
continue;
}
_copy_samples(toKey, toKey - dist, true);
_copy_samples(toKey, toKey + dist, true);
}
}
}
// ----------------------------------------------------------------------------
// sstore_get_samples
// ----------------------------------------------------------------------------
void _update_rrIdx(int key, int layer) {
int rrIdx = _sStore.rrIdx[key][layer] + 1;
_sStore.rrIdx[key][layer] = rrIdx % _sStore.numSamples[key][layer];
}
// Returns sample 1 mix amplification.
double sstore_get_samples(int key, double vel, Sample **s1, Sample **s2)
{
*s1 = *s2 = NULL;
bool mixLayers = ctrls_value(CTRL_MIX_LAYERS) > 0.5;
int numLayers = _sStore.numLayers[key];
if(numLayers == 0) {
return 0;
}
// If we're mixing layers, then the maximum value is decreased by 1.
if(mixLayers) {
numLayers -= 1;
}
// Scale the velocity to find the appropriate layer.
vel = pow(vel, ctrls_value(CTRL_GAMMA_LAYER));
double layer = ((double)numLayers * vel);
// This is the lower layer.
int layer0 = (int)layer;
if(layer0 == _sStore.numLayers[key]) {
--layer0;
}
_update_rrIdx(key, layer0);
*s1 = &(_sStore.sample[key][layer0][_sStore.rrIdx[key][layer0]]);
// If not mixing layers, we're done.
if(!mixLayers) {
return 1;
}
// If we're already at the top-most layer, we're done.
if(layer0 == numLayers) {
return 1;
}
// If the number of samples doesn't match, we don't mix.
if(_sStore.numSamples[key][layer0] !=
_sStore.numSamples[key][layer0 + 1]) {
return 1;
}
*s2 = &(_sStore.sample[key][layer0 + 1][_sStore.rrIdx[key][layer0]]);
return 1 - (layer - (double)layer0);
}
// ----------------------------------------------------------------------------
// sstore_fake_rc_layer
// ----------------------------------------------------------------------------
static void _filter_sample(Sample *s, int order) {
int16_t * newData = malloc_exit((2 * s->len + 2) * sizeof(int16_t));
for(int i = 0; i < 2*s->len + 2; ++i) {
newData[i] = s->data[i];
}
rcLowPass(newData, s->len, 10, order);
s->data = newData;
s->owner = true;
}
void sstore_fake_rc_layer(int order)
{
for(int key = 0; key < 128; ++key) {
if(_sStore.numLayers[key] != 1 || _sStore.numSamples[key][0] == 0) {
continue;
}
_sStore.numLayers[key] = 2;
// Copy samples to layer 2.
_sStore.numSamples[key][1] = _sStore.numSamples[key][0];
for(int var = 0; var < _sStore.numSamples[key][0]; ++var) {
Sample *s0 = &(_sStore.sample[key][0][var]);
Sample *s1 = &(_sStore.sample[key][1][var]);
*s1 = *s0;
_filter_sample(s0, order);
}
}
}
|
wip_tti_kernel_so4.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r34)[y_size + 1][z_size + 1];
posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r35)[y_size + 1][z_size + 1];
posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 4;
int t_blk_size = 2 * sf * (time_M - time_m);
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = time_m, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= time_M; time += 1, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M - (x_M - x_m + 2) % (x0_blk0_size), x_m - 1, y0_blk0_size, y_M - (y_M - y_m + 2) % (y0_blk0_size), y_m - 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M - (x_M - x_m + 2) % (x0_blk0_size), x_m - 1, (y_M - y_m + 2) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 2) % (y0_blk0_size) + 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, (x_M - x_m + 2) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 2) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 2) % (y0_blk0_size), y_m - 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, (x_M - x_m + 2) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 2) % (x0_blk0_size) + 1, (y_M - y_m + 2) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 2) % (y0_blk0_size) + 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x1_blk0_size, x_M - (x_M - x_m + 1) % (x1_blk0_size), x_m, y1_blk0_size, y_M - (y_M - y_m + 1) % (y1_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x1_blk0_size, x_M - (x_M - x_m + 1) % (x1_blk0_size), x_m, (y_M - y_m + 1) % (y1_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y1_blk0_size) + 1, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, (x_M - x_m + 1) % (x1_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x1_blk0_size) + 1, y1_blk0_size, y_M - (y_M - y_m + 1) % (y1_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, (x_M - x_m + 1) % (x1_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x1_blk0_size) + 1, (y_M - y_m + 1) % (y1_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y1_blk0_size) + 1, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
/*
for (int time = time_m, t1 = (time + 1) % (3); time <= time_M; time += 1, t1 = (time + 1) % (3))
{
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
Begin section2
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F / 3.0F) * (x_M - x_m + 1) / nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic, chunk_size)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
}
}
}
End section2
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec - start_section2.tv_sec) + (double)(end_section2.tv_usec - start_section2.tv_usec) / 1000000;
}
*/
free(r21);
free(r20);
free(r19);
free(r18);
free(r17);
free(r34);
free(r35);
return 0;
}
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
printf(" Change of inner x0_blk0 %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r39 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r40 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x1_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x1_blk0 = max((x_m + time), xb - 2); x1_blk0 <= + min((x_M + time), (xb - 2 + xb_size)); x1_blk0 += x1_blk0_size)
{
printf(" Change of inner x1_blk0 %d \n", x1_blk0);
for (int y1_blk0 = max((y_m + time), yb - 2); y1_blk0 <= + min((y_M + time), (yb - 2 + yb_size)); y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 2 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++)
{
for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 2 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++)
{
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[x - time + 1][y - time + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r19[x - time + 1][y][z + 1] * r20[x - time + 1][y][z + 1] * r35[x - time + 1][y][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r20[x][y - time + 1][z + 1] * r21[x][y - time + 1][z + 1] * r35[x][y - time + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1];
float r43 = pow(vp[x - time + 4][y - time + 4][z + 4], -2);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[x - time + 1][y - time + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r19[x - time + 1][y][z + 1] * r20[x - time + 1][y][z + 1] * r34[x - time + 1][y][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r20[x][y - time + 1][z + 1] * r21[x][y - time + 1][z + 1] * r34[x][y - time + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t2][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t2][x - time + 4][y - time + 4][z + 4]);
u[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x][y][sp_zi];
float r22 = save_src_u[tw][source_id[x][y][zind]] * source_mask[x][y][zind];
#pragma omp atomic update
u[t1][x - time + 4][y - time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x][y][zind]] * source_mask[x][y][zind];
#pragma omp atomic update
v[t1][x - time + 4][y - time + 4][zind + 4] += r23;
printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4);
}
}
}
}
}
}
}
/* Backdoor edit at Wed Sep 9 19:03:00 2020*/
/* Backdoor edit at Thu Sep 10 14:26:00 2020*/
/* Backdoor edit at Thu Sep 10 14:27:06 2020*/
/* Backdoor edit at Thu Sep 10 14:29:42 2020*/
|
mandel-omp.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel for collapse(2) schedule(runtime)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
//#pragma omp flush
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
jacld.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "applu.incl"
//---------------------------------------------------------------------
// compute the lower triangular part of the jacobian matrix
//---------------------------------------------------------------------
void jacld(int k)
{
//---------------------------------------------------------------------
// local variables
//---------------------------------------------------------------------
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for schedule(static) nowait
for (j = jst; j < jend; j++) {
for (i = ist; i < iend; i++) {
//---------------------------------------------------------------------
// form the block daigonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[j][i][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 );
d[j][i][1][0] = 0.0;
d[j][i][2][0] = 0.0;
d[j][i][3][0] = 0.0;
d[j][i][4][0] = 0.0;
d[j][i][0][1] = -dt * 2.0
* ( tx1 * r43 + ty1 + tz1 ) * c34 * tmp2 * u[k][j][i][1];
d[j][i][1][1] = 1.0
+ dt * 2.0 * c34 * tmp1 * ( tx1 * r43 + ty1 + tz1 )
+ dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 );
d[j][i][2][1] = 0.0;
d[j][i][3][1] = 0.0;
d[j][i][4][1] = 0.0;
d[j][i][0][2] = -dt * 2.0
* ( tx1 + ty1 * r43 + tz1 ) * c34 * tmp2 * u[k][j][i][2];
d[j][i][1][2] = 0.0;
d[j][i][2][2] = 1.0
+ dt * 2.0 * c34 * tmp1 * ( tx1 + ty1 * r43 + tz1 )
+ dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 );
d[j][i][3][2] = 0.0;
d[j][i][4][2] = 0.0;
d[j][i][0][3] = -dt * 2.0
* ( tx1 + ty1 + tz1 * r43 ) * c34 * tmp2 * u[k][j][i][3];
d[j][i][1][3] = 0.0;
d[j][i][2][3] = 0.0;
d[j][i][3][3] = 1.0
+ dt * 2.0 * c34 * tmp1 * ( tx1 + ty1 + tz1 * r43 )
+ dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 );
d[j][i][4][3] = 0.0;
d[j][i][0][4] = -dt * 2.0
* ( ( ( tx1 * ( r43*c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( c34 - c1345 ) ) * ( u[k][j][i][1]*u[k][j][i][1] )
+ ( tx1 * ( c34 - c1345 )
+ ty1 * ( r43*c34 - c1345 )
+ tz1 * ( c34 - c1345 ) ) * ( u[k][j][i][2]*u[k][j][i][2] )
+ ( tx1 * ( c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( r43*c34 - c1345 ) ) * (u[k][j][i][3]*u[k][j][i][3])
) * tmp3
+ ( tx1 + ty1 + tz1 ) * c1345 * tmp2 * u[k][j][i][4] );
d[j][i][1][4] = dt * 2.0 * tmp2 * u[k][j][i][1]
* ( tx1 * ( r43*c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( c34 - c1345 ) );
d[j][i][2][4] = dt * 2.0 * tmp2 * u[k][j][i][2]
* ( tx1 * ( c34 - c1345 )
+ ty1 * ( r43*c34 -c1345 )
+ tz1 * ( c34 - c1345 ) );
d[j][i][3][4] = dt * 2.0 * tmp2 * u[k][j][i][3]
* ( tx1 * ( c34 - c1345 )
+ ty1 * ( c34 - c1345 )
+ tz1 * ( r43*c34 - c1345 ) );
d[j][i][4][4] = 1.0
+ dt * 2.0 * ( tx1 + ty1 + tz1 ) * c1345 * tmp1
+ dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 );
//---------------------------------------------------------------------
// form the first block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k-1][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[j][i][0][0] = - dt * tz1 * dz1;
a[j][i][1][0] = 0.0;
a[j][i][2][0] = 0.0;
a[j][i][3][0] = - dt * tz2;
a[j][i][4][0] = 0.0;
a[j][i][0][1] = - dt * tz2
* ( - ( u[k-1][j][i][1]*u[k-1][j][i][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[k-1][j][i][1] );
a[j][i][1][1] = - dt * tz2 * ( u[k-1][j][i][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2;
a[j][i][2][1] = 0.0;
a[j][i][3][1] = - dt * tz2 * ( u[k-1][j][i][1] * tmp1 );
a[j][i][4][1] = 0.0;
a[j][i][0][2] = - dt * tz2
* ( - ( u[k-1][j][i][2]*u[k-1][j][i][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[k-1][j][i][2] );
a[j][i][1][2] = 0.0;
a[j][i][2][2] = - dt * tz2 * ( u[k-1][j][i][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[j][i][3][2] = - dt * tz2 * ( u[k-1][j][i][2] * tmp1 );
a[j][i][4][2] = 0.0;
a[j][i][0][3] = - dt * tz2
* ( - ( u[k-1][j][i][3] * tmp1 ) * ( u[k-1][j][i][3] * tmp1 )
+ C2 * qs[k-1][j][i] * tmp1 )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[k-1][j][i][3] );
a[j][i][1][3] = - dt * tz2
* ( - C2 * ( u[k-1][j][i][1] * tmp1 ) );
a[j][i][2][3] = - dt * tz2
* ( - C2 * ( u[k-1][j][i][2] * tmp1 ) );
a[j][i][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[k-1][j][i][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[j][i][4][3] = - dt * tz2 * C2;
a[j][i][0][4] = - dt * tz2
* ( ( C2 * 2.0 * qs[k-1][j][i] - C1 * u[k-1][j][i][4] )
* u[k-1][j][i][3] * tmp2 )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[k-1][j][i][1]*u[k-1][j][i][1])
- ( c34 - c1345 ) * tmp3 * (u[k-1][j][i][2]*u[k-1][j][i][2])
- ( r43*c34 - c1345 )* tmp3 * (u[k-1][j][i][3]*u[k-1][j][i][3])
- c1345 * tmp2 * u[k-1][j][i][4] );
a[j][i][1][4] = - dt * tz2
* ( - C2 * ( u[k-1][j][i][1]*u[k-1][j][i][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[k-1][j][i][1];
a[j][i][2][4] = - dt * tz2
* ( - C2 * ( u[k-1][j][i][2]*u[k-1][j][i][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[k-1][j][i][2];
a[j][i][3][4] = - dt * tz2
* ( C1 * ( u[k-1][j][i][4] * tmp1 )
- C2 * ( qs[k-1][j][i] * tmp1
+ u[k-1][j][i][3]*u[k-1][j][i][3] * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[k-1][j][i][3];
a[j][i][4][4] = - dt * tz2
* ( C1 * ( u[k-1][j][i][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
//---------------------------------------------------------------------
// form the second block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j-1][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[j][i][0][0] = - dt * ty1 * dy1;
b[j][i][1][0] = 0.0;
b[j][i][2][0] = - dt * ty2;
b[j][i][3][0] = 0.0;
b[j][i][4][0] = 0.0;
b[j][i][0][1] = - dt * ty2
* ( - ( u[k][j-1][i][1]*u[k][j-1][i][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[k][j-1][i][1] );
b[j][i][1][1] = - dt * ty2 * ( u[k][j-1][i][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[j][i][2][1] = - dt * ty2 * ( u[k][j-1][i][1] * tmp1 );
b[j][i][3][1] = 0.0;
b[j][i][4][1] = 0.0;
b[j][i][0][2] = - dt * ty2
* ( - ( u[k][j-1][i][2] * tmp1 ) * ( u[k][j-1][i][2] * tmp1 )
+ C2 * ( qs[k][j-1][i] * tmp1 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[k][j-1][i][2] );
b[j][i][1][2] = - dt * ty2
* ( - C2 * ( u[k][j-1][i][1] * tmp1 ) );
b[j][i][2][2] = - dt * ty2 * ( (2.0 - C2) * (u[k][j-1][i][2] * tmp1) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[j][i][3][2] = - dt * ty2 * ( - C2 * ( u[k][j-1][i][3] * tmp1 ) );
b[j][i][4][2] = - dt * ty2 * C2;
b[j][i][0][3] = - dt * ty2
* ( - ( u[k][j-1][i][2]*u[k][j-1][i][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[k][j-1][i][3] );
b[j][i][1][3] = 0.0;
b[j][i][2][3] = - dt * ty2 * ( u[k][j-1][i][3] * tmp1 );
b[j][i][3][3] = - dt * ty2 * ( u[k][j-1][i][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[j][i][4][3] = 0.0;
b[j][i][0][4] = - dt * ty2
* ( ( C2 * 2.0 * qs[k][j-1][i] - C1 * u[k][j-1][i][4] )
* ( u[k][j-1][i][2] * tmp2 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(u[k][j-1][i][1]*u[k][j-1][i][1])
- ( r43*c34 - c1345 )*tmp3*(u[k][j-1][i][2]*u[k][j-1][i][2])
- ( c34 - c1345 )*tmp3*(u[k][j-1][i][3]*u[k][j-1][i][3])
- c1345*tmp2*u[k][j-1][i][4] );
b[j][i][1][4] = - dt * ty2
* ( - C2 * ( u[k][j-1][i][1]*u[k][j-1][i][2] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[k][j-1][i][1];
b[j][i][2][4] = - dt * ty2
* ( C1 * ( u[k][j-1][i][4] * tmp1 )
- C2 * ( qs[k][j-1][i] * tmp1
+ u[k][j-1][i][2]*u[k][j-1][i][2] * tmp2 ) )
- dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[k][j-1][i][2];
b[j][i][3][4] = - dt * ty2
* ( - C2 * ( u[k][j-1][i][2]*u[k][j-1][i][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[k][j-1][i][3];
b[j][i][4][4] = - dt * ty2
* ( C1 * ( u[k][j-1][i][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
//---------------------------------------------------------------------
// form the third block sub-diagonal
//---------------------------------------------------------------------
tmp1 = rho_i[k][j][i-1];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[j][i][0][0] = - dt * tx1 * dx1;
c[j][i][1][0] = - dt * tx2;
c[j][i][2][0] = 0.0;
c[j][i][3][0] = 0.0;
c[j][i][4][0] = 0.0;
c[j][i][0][1] = - dt * tx2
* ( - ( u[k][j][i-1][1] * tmp1 ) * ( u[k][j][i-1][1] * tmp1 )
+ C2 * qs[k][j][i-1] * tmp1 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[k][j][i-1][1] );
c[j][i][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[k][j][i-1][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[j][i][2][1] = - dt * tx2
* ( - C2 * ( u[k][j][i-1][2] * tmp1 ) );
c[j][i][3][1] = - dt * tx2
* ( - C2 * ( u[k][j][i-1][3] * tmp1 ) );
c[j][i][4][1] = - dt * tx2 * C2;
c[j][i][0][2] = - dt * tx2
* ( - ( u[k][j][i-1][1] * u[k][j][i-1][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[k][j][i-1][2] );
c[j][i][1][2] = - dt * tx2 * ( u[k][j][i-1][2] * tmp1 );
c[j][i][2][2] = - dt * tx2 * ( u[k][j][i-1][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[j][i][3][2] = 0.0;
c[j][i][4][2] = 0.0;
c[j][i][0][3] = - dt * tx2
* ( - ( u[k][j][i-1][1]*u[k][j][i-1][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[k][j][i-1][3] );
c[j][i][1][3] = - dt * tx2 * ( u[k][j][i-1][3] * tmp1 );
c[j][i][2][3] = 0.0;
c[j][i][3][3] = - dt * tx2 * ( u[k][j][i-1][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4;
c[j][i][4][3] = 0.0;
c[j][i][0][4] = - dt * tx2
* ( ( C2 * 2.0 * qs[k][j][i-1] - C1 * u[k][j][i-1][4] )
* u[k][j][i-1][1] * tmp2 )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( u[k][j][i-1][1]*u[k][j][i-1][1] )
- ( c34 - c1345 ) * tmp3 * ( u[k][j][i-1][2]*u[k][j][i-1][2] )
- ( c34 - c1345 ) * tmp3 * ( u[k][j][i-1][3]*u[k][j][i-1][3] )
- c1345 * tmp2 * u[k][j][i-1][4] );
c[j][i][1][4] = - dt * tx2
* ( C1 * ( u[k][j][i-1][4] * tmp1 )
- C2 * ( u[k][j][i-1][1]*u[k][j][i-1][1] * tmp2
+ qs[k][j][i-1] * tmp1 ) )
- dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[k][j][i-1][1];
c[j][i][2][4] = - dt * tx2
* ( - C2 * ( u[k][j][i-1][2]*u[k][j][i-1][1] ) * tmp2 )
- dt * tx1 * ( c34 - c1345 ) * tmp2 * u[k][j][i-1][2];
c[j][i][3][4] = - dt * tx2
* ( - C2 * ( u[k][j][i-1][3]*u[k][j][i-1][1] ) * tmp2 )
- dt * tx1 * ( c34 - c1345 ) * tmp2 * u[k][j][i-1][3];
c[j][i][4][4] = - dt * tx2
* ( C1 * ( u[k][j][i-1][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
|
GB_binop__isne_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int16)
// A*D function (colscale): GB (_AxD__isne_int16)
// D*A function (rowscale): GB (_DxB__isne_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int16)
// C=scalar+B GB (_bind1st__isne_int16)
// C=scalar+B' GB (_bind1st_tran__isne_int16)
// C=A+scalar GB (_bind2nd__isne_int16)
// C=A'+scalar GB (_bind2nd_tran__isne_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 531,
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
// max index value is 2013. +12 to obtain a valid xa2[idx] after xa1+12.
// +1 to ensure a reference like base[2015] is within the bound.
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
// default static even scheduling may not trigger data race, using static,1 instead.
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0 + i;
xa2[idx]+= 3.0 + i;
}
printf("x1[999]=%lf xa2[1285]=%lf\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
ktuple_pair.c | /* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*********************************************************************
* Clustal Omega - Multiple sequence alignment
*
* Copyright (C) 2010 University College Dublin
*
* Clustal-Omega is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is part of Clustal-Omega.
*
********************************************************************/
/*
* RCS $Id: ktuple_pair.c 230 2011-04-09 15:37:50Z andreas $
*
*
* K-Tuple code for pairwise alignment (Wilbur and Lipman, 1983; PMID
* 6572363). Most code taken from showpair.c (Clustal 1.83)
* DD: some functions now have lots of parameters as static variables
* were removed to make code OpenMP-friendly
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "squid/squid.h"
#include "util.h"
#include "symmatrix.h"
#include "ktuple_pair.h"
#include "log.h"
#include "progress.h"
#define END_MARK -3 /* see interface.c in 1.83 */
#define NUMRES 32 /* max size of comparison matrix */
/* see notes below */
#undef SORT_LAST_ELEMENT_AS_WELL
/* gap_pos1 = NUMRES-2; /@ code for gaps inserted by clustalw @/ */
static const int GAP_POS2 = NUMRES-1; /* code for gaps already in alignment */
static bool DNAFLAG = FALSE;
static const char *AMINO_ACID_CODES = "ABCDEFGHIKLMNPQRSTUVWXYZ-";
static const char *NUCLEIC_ACID_CODES = "ACGTUN-";
/* As far as I understand the gap symbol should not be necessary here,
* because we use isgap for testing later anyway. But changing this,
* will affect max_res_code and max_nuc as well. So I leave it for now
* as it is. AW
*/
static bool percent = TRUE;
static void make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array);
static void put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum);
static bool frag_rel_pos(int a1, int b1, int a2, int b2, int ktup);
static void des_quick_sort(int *array1, int *array2, const int array_size);
static void pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param,
char **seq_array, int *maxsf, int **accum, int max_aln_length,
int *zza, int *zzb, int *zzc, int *zzd);
static void encode(char *seq, char *naseq, int l, const char *res_codes);
static int res_index(const char *lookup, char c);
typedef struct {
int i1;
int i2;
} two_ints_t;
/* default ktuple pairwise alignment parameters
*
*/
/* protein
*/
/* designated initializer */
const ktuple_param_t default_protein_param = {
.ktup = 1,
.wind_gap = 3,
.signif = 5,
.window = 5,
};
/* dna
*/
/* designated initializer */
const ktuple_param_t default_dna_param = {
.ktup = 2,
.wind_gap = 5,
.signif = 4,
.window = 4,
};
/**
* note: naseq should be unit-offset
*/
static void
encode(char *seq, char *naseq, int l, const char *res_codes)
{
/* code seq as ints .. use GAP_POS2 for gap */
register int i;
bool seq_contains_unknown_char = FALSE;
/*LOG_DEBUG("seq=%s naseq=%p l=%d", &(seq[1]), naseq, l); */
for (i=1; i<=l; i++) {
char res = toupper(seq[i]);
if (isgap(res)) {
naseq[i] = GAP_POS2; /* gap in input */
} else {
naseq[i] = res_index(res_codes, res);
}
/*LOG_DEBUG("Character '%c' at pos %d", res, i);*/
if (-1 == naseq[i]) {
seq_contains_unknown_char = TRUE;
/*LOG_DEBUG("Unknown character '%c' at pos %d", res, i);*/
}
/*LOG_DEBUG("na_seq[%d]=%d", i, naseq[i]);*/
}
if (TRUE == seq_contains_unknown_char)
Log(&rLog, LOG_WARN, "Unknown character in seq '%s'", &(seq[1]));
naseq[i] = END_MARK;
return;
}
/* end of encode */
/**
*
*/
static int
res_index(const char *t, char c)
{
register int i;
for (i=0; t[i] && t[i] != c; i++)
;
if (t[i]) {
return (i);
} else {
return -1;
}
}
/* end of res_index */
/**
*
*/
static void
make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array)
{
/* FIXME make 10 a constant and give it a nice name */
static int a[10];
int i, j, code, flag;
char residue;
const int limit = (int) pow((double)(max_res_code+1),(double)ktup);
for (i=1;i<=ktup;i++)
a[i] = (int) pow((double)(max_res_code+1),(double)(i-1));
for (i=1; i<=limit; ++i)
pl[i]=0;
for (i=1; i<=l; ++i)
tptr[i]=0;
for (i=1; i<=(l-ktup+1); ++i) {
code=0;
flag=FALSE;
for (j=1; j<=ktup; ++j) {
/* Log(&rLog, LOG_FORCED_DEBUG, "naseq=%d i=%d j=%d seq_array[naseq]=%p",
* naseq, i, j, seq_array[naseq]);
*/
residue = seq_array[naseq][i+j-1];
/* Log(&rLog, LOG_FORCED_DEBUG, "residue = %d", residue); */
if ((residue<0) || (residue > max_res_code)){
flag=TRUE;
break;
}
code += ((residue) * a[j]);
}
if (flag)
continue;
++code;
if (0 != pl[code])
tptr[i] =pl[code];
pl[code] = i;
}
return;
}
/* end of make_ptrs */
/**
*
* FIXME Why hardcoding of 5?
*/
static void
put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum)
{
int end;
accum[0][curr_frag]=fs;
accum[1][curr_frag]=v1;
accum[2][curr_frag]=v2;
accum[3][curr_frag]=flen;
if (!*maxsf) {
*maxsf=1;
accum[4][curr_frag]=0;
return;
}
if (fs >= accum[0][*maxsf]) {
accum[4][curr_frag]=*maxsf;
*maxsf=curr_frag;
return;
} else {
*next=*maxsf;
while (TRUE) {
end=*next;
*next=accum[4][*next];
if (fs>=accum[0][*next])
break;
}
accum[4][curr_frag]=*next;
accum[4][end]=curr_frag;
}
return;
}
/* end of put_frag */
/**
*
*/
static bool
frag_rel_pos(int a1, int b1, int a2, int b2, int ktup)
{
if (a1-b1 == a2-b2) {
if (a2<a1) {
return TRUE;
}
} else {
if (a2+ktup-1<a1 && b2+ktup-1<b1) {
return TRUE;
}
}
return FALSE;
}
/* end of frag_rel_pos */
/**
*
* @note: This is together with des_quick_sort most time consuming
* routine according to gprof on r110. Tried to replace it with qsort
* and/or QSortAndTrackIndex(), which is always slower! So we keep the
* original.
*
* Original doc: Quicksort routine, adapted from chapter 4, page 115
* of software tools by Kernighan and Plauger, (1986). Sort the
* elements of array1 and sort the elements of array2 accordingly
*
* There might be a bug here. The original function apparently never
* touches the last element and keeps it as is. Tried to fix this (see
* SORT_LAST_ELEMENT_AS_WELL) which gives slightly worse performance
* (-0.5% on BB). My fix might not be working or it's not a bug at
* all...
*
*
*
*/
static void
des_quick_sort(int *array1, int *array2, const int array_size)
{
int temp1, temp2;
int p, pivlin;
int i, j;
int lst[50], ust[50]; /* the maximum no. of elements must be*/
/* < log(base2) of 50 */
#if 0
for (i=1; i<=array_size; i++) {
Log(&rLog, LOG_FORCED_DEBUG, "b4 sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]);
}
#endif
lst[1] = 1;
#ifdef SORT_LAST_ELEMENT_AS_WELL
ust[1] = array_size;
#else
/* original */
ust[1] = array_size-1;
#endif
p = 1;
while (p > 0) {
if (lst[p] >= ust[p]) {
p--;
} else {
i = lst[p] - 1;
j = ust[p];
pivlin = array1[j];
while (i < j) {
for (i=i+1; array1[i] < pivlin; i++)
;
for (j=j-1; j > i; j--)
if (array1[j] <= pivlin) break;
if (i < j) {
temp1 = array1[i];
array1[i] = array1[j];
array1[j] = temp1;
temp2 = array2[i];
array2[i] = array2[j];
array2[j] = temp2;
}
}
j = ust[p];
temp1 = array1[i];
array1[i] = array1[j];
array1[j] = temp1;
temp2 = array2[i];
array2[i] = array2[j];
array2[j] = temp2;
if (i-lst[p] < ust[p] - i) {
lst[p+1] = lst[p];
ust[p+1] = i - 1;
lst[p] = i + 1;
} else {
lst[p+1] = i + 1;
ust[p+1] = ust[p];
ust[p] = i - 1;
}
p = p + 1;
}
}
#if 0
for (i=1; i<=array_size; i++) {
Log(&rLog, LOG_FORCED_DEBUG, "after sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]);
}
#endif
return;
}
/* end of des_quick_sort */
/**
*
* FIXME together with des_quick_sort most time consuming routine
* according to gprof on r110
*
*/
static void
pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param,
char **seq_array, int *maxsf, int **accum, int max_aln_length,
int *zza, int *zzb, int *zzc, int *zzd)
{
int next; /* forrmerly static */
int pot[8],i, j, l, m, flag, limit, pos, vn1, vn2, flen, osptr, fs;
int tv1, tv2, encrypt, subt1, subt2, rmndr;
char residue;
int *diag_index;
int *displ;
char *slopes;
int curr_frag;
const int tl1 = (l1+l2)-1;
assert(NULL!=aln_param);
/*
Log(&rLog, LOG_FORCED_DEBUG, "DNAFLAG=%d seq_no=%d l1=%d l2=%d window=%d ktup=%d signif=%d wind_gap=%d",
DNAFLAG, seq_no, l1, l2, window, ktup, signif,
wind_gap);
*/
slopes = (char *) CKCALLOC(tl1+1, sizeof(char));
displ = (int *) CKCALLOC(tl1+1, sizeof(int));
diag_index = (int *) CKMALLOC((tl1+1) * sizeof(int));
for (i=1; i<=tl1; ++i) {
/* unnecessary, because we calloced: slopes[i] = displ[i] = 0; */
diag_index[i] = i;
}
for (i=1;i<=aln_param->ktup;i++)
pot[i] = (int) pow((double)(max_res_code+1),(double)(i-1));
limit = (int) pow((double)(max_res_code+1),(double)aln_param->ktup);
/* increment diagonal score for each k_tuple match */
for (i=1; i<=limit; ++i) {
vn1=zzc[i];
while (TRUE) {
if (!vn1) break;
vn2 = zzd[i];
while (0 != vn2) {
osptr = vn1-vn2+l2;
++displ[osptr];
vn2 = zzb[vn2];
}
vn1=zza[vn1];
}
}
/* choose the top SIGNIF diagonals
*/
#ifdef QSORT_REPLACEMENT
/* This was an attempt to replace des_quick_sort with qsort(),
* which turns out to be much slower, so don't use this
*/
/* FIXME: if we use this branch, we don't need to init diag_index
* before, because that is done in QSortAndTrackIndex()
* automatically.
*/
#if 0
for (i=1; i<=tl1; i++) {
Log(&rLog, LOG_FORCED_DEBUG, "b4 sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]);
}
#endif
QSortAndTrackIndex(&(diag_index[1]), &(displ[1]), tl1, 'a', TRUE);
#if 0
for (i=1; i<=tl1; i++) {
Log(&rLog, LOG_FORCED_DEBUG, "after sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]);
}
#endif
#else
des_quick_sort(displ, diag_index, tl1);
#endif
j = tl1 - aln_param->signif + 1;
if (j < 1) {
j = 1;
}
/* flag all diagonals within WINDOW of a top diagonal */
for (i=tl1; i>=j; i--) {
if (displ[i] > 0) {
pos = diag_index[i];
l = (1 > pos - aln_param->window) ?
1 : pos - aln_param->window;
m = (tl1 < pos + aln_param->window) ?
tl1 : pos + aln_param->window;
for (; l <= m; l++)
slopes[l] = 1;
}
}
for (i=1; i<=tl1; i++) {
displ[i] = 0;
}
curr_frag=*maxsf=0;
for (i=1; i<=(l1-aln_param->ktup+1); ++i) {
encrypt=flag=0;
for (j=1; j<=aln_param->ktup; ++j) {
residue = seq_array[seq_no][i+j-1];
if ((residue<0) || (residue>max_res_code)) {
flag=TRUE;
break;
}
encrypt += ((residue)*pot[j]);
}
if (flag) {
continue;
}
++encrypt;
vn2=zzd[encrypt];
flag=FALSE;
while (TRUE) {
if (!vn2) {
flag=TRUE;
break;
}
osptr=i-vn2+l2;
if (1 != slopes[osptr]) {
vn2=zzb[vn2];
continue;
}
flen=0;
fs=aln_param->ktup;
next=*maxsf;
/*
* A-loop
*/
while (TRUE) {
if (!next) {
++curr_frag;
if (curr_frag >= 2*max_aln_length) {
Log(&rLog, LOG_VERBOSE, "(Partial alignment)");
goto free_and_exit; /* Yesss! Always wanted to
* use a goto (AW) */
}
displ[osptr]=curr_frag;
put_frag(fs, i, vn2, flen, curr_frag, &next, maxsf, accum);
} else {
tv1=accum[1][next];
tv2=accum[2][next];
if (frag_rel_pos(i, vn2, tv1, tv2, aln_param->ktup)) {
if (i-vn2 == accum[1][next]-accum[2][next]) {
if (i > accum[1][next]+(aln_param->ktup-1)) {
fs = accum[0][next]+aln_param->ktup;
} else {
rmndr = i-accum[1][next];
fs = accum[0][next]+rmndr;
}
flen=next;
next=0;
continue;
} else {
if (0 == displ[osptr]) {
subt1=aln_param->ktup;
} else {
if (i > accum[1][displ[osptr]]+(aln_param->ktup-1)) {
subt1=accum[0][displ[osptr]]+aln_param->ktup;
} else {
rmndr=i-accum[1][displ[osptr]];
subt1=accum[0][displ[osptr]]+rmndr;
}
}
subt2=accum[0][next] - aln_param->wind_gap + aln_param->ktup;
if (subt2>subt1) {
flen=next;
fs=subt2;
} else {
flen=displ[osptr];
fs=subt1;
}
next=0;
continue;
}
} else {
next=accum[4][next];
continue;
}
}
break;
}
/*
* End of Aloop
*/
vn2=zzb[vn2];
}
}
free_and_exit:
CKFREE(displ);
CKFREE(slopes);
CKFREE(diag_index);
return;
}
/* end of pair_align */
/**
*
* Will compute ktuple scores and store in tmat
* Following values will be set: tmat[i][j], where
* istart <= i <iend
* and
* jstart <= j < jend
* i.e. zero-offset
* tmat data members have to be preallocated
*
* if ktuple_param_t *aln_param == NULL defaults will be used
*/
void
KTuplePairDist(symmatrix_t *tmat, mseq_t *mseq,
int istart, int iend,
int jstart, int jend,
ktuple_param_t *param_override,
progress_t *prProgress,
unsigned long int *ulStepNo, unsigned long int ulTotalStepNo)
{
/* this first group of variables were previously static
and hence un-parallelisable */
char **seq_array;
int maxsf;
int **accum;
int max_aln_length;
/* divide score with length of smallest sequence */
int *zza, *zzb, *zzc, *zzd;
int private_step_no = 0;
int i, j, dsr;
double calc_score;
int max_res_code = -1;
int max_seq_len;
int *seqlen_array;
/* progress_t *prProgress; */
/* int uStepNo, uTotalStepNo; */
ktuple_param_t aln_param = default_protein_param;
bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE;
if(prProgress == NULL) {
NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO),
"Ktuple-distance calculation progress", bPrintCR);
}
/* conversion to old style data types follows
*
*/
seqlen_array = (int*) CKMALLOC((mseq->nseqs+1) * sizeof(int));
for (i=0; i<mseq->nseqs; i++) {
seqlen_array[i+1] = mseq->sqinfo[i].len;
}
/* setup alignment parameters
*/
if (SEQTYPE_PROTEIN == mseq->seqtype) {
DNAFLAG = FALSE;
max_res_code = strlen(AMINO_ACID_CODES)-2;
aln_param = default_protein_param;
} else if (SEQTYPE_RNA == mseq->seqtype || SEQTYPE_DNA == mseq->seqtype) {
DNAFLAG = TRUE;
max_res_code = strlen(NUCLEIC_ACID_CODES)-2;
aln_param = default_dna_param;
} else {
Log(&rLog, LOG_FATAL, "Internal error in %s: Unknown sequence type.", __FUNCTION__);
}
if (NULL!=param_override) {
aln_param.ktup = param_override->ktup;
aln_param.wind_gap = param_override->wind_gap;
aln_param.signif = param_override->signif;
aln_param.window = param_override->window;
}
/*LOG_DEBUG("DNAFLAG = %d max_res_code = %d", DNAFLAG, max_res_code);*/
/* convert mseq to clustal's old-style int encoded sequences (unit-offset)
*/
max_aln_length = 0;
max_seq_len = 0;
seq_array = (char **) CKMALLOC((mseq->nseqs+1) * sizeof(char *));
seq_array[0] = NULL;
/* FIXME check that non of the seqs is smaller than ktup (?).
* Otherwise segfault occurs
*/
for (i=0; i<mseq->nseqs; i++) {
seq_array[i+1] = (char *) CKMALLOC((seqlen_array[i+1]+2) * sizeof (char));;
}
for (i=0; i<mseq->nseqs; i++) {
/*LOG_DEBUG("calling encode with seq_array[%d+1] len=%d and seq=%s",
i, seqlen_array[i+1], mseq->seq[i]);*/
if (TRUE == DNAFLAG) {
encode(&(mseq->seq[i][-1]), seq_array[i+1],
seqlen_array[i+1], NUCLEIC_ACID_CODES);
} else {
encode(&(mseq->seq[i][-1]), seq_array[i+1],
seqlen_array[i+1], AMINO_ACID_CODES);
}
if (seqlen_array[i+1]>max_seq_len) {
max_seq_len = seqlen_array[i+1];
}
}
max_aln_length = max_seq_len * 2;
/* see sequence.c in old source */
/* FIXME: short sequences can cause seg-fault
* because max_aln_length can get shorter
* than (max_res_code+1)^k
* FS, r222->r223 */
max_aln_length = max_aln_length > pow((max_res_code+1), aln_param.ktup)+1 ?
max_aln_length : pow((max_res_code+1), aln_param.ktup)+1;
/*
*
* conversion to old style clustal done (in no time) */
accum = (int **) CKCALLOC(5, sizeof (int *));
for (i=0;i<5;i++) {
accum[i] = (int *) CKCALLOC((2*max_aln_length+1), sizeof(int));
}
zza = (int *) CKCALLOC( (max_aln_length+1), sizeof(int));
zzb = (int *) CKCALLOC( (max_aln_length+1), sizeof(int));
zzc = (int *) CKCALLOC( (max_aln_length+1), sizeof(int));
zzd = (int *) CKCALLOC( (max_aln_length+1), sizeof(int));
/* estimation of total number of steps (if istart and jstart are
* both 0) (now handled in the calling routine)
*/
/* uTotalStepNo = iend*jend - iend*iend/2 + iend/2;
uStepNo = 0; */
/*LOG_DEBUG("istart=%d iend=%d jstart=%d jend=%d", istart, iend, jstart, jend);*/
for (i=istart+1; i<=iend; ++i) {
/* by definition a sequence compared to itself should give
a score of 0. AW */
SymMatrixSetValue(tmat, i-1, i-1, 0.0);
make_ptrs(zza, zzc, i, seqlen_array[i], aln_param.ktup, max_res_code, seq_array);
#ifdef HAVE_OPENMP
#pragma omp critical(ktuple)
#endif
{
ProgressLog(prProgress, *ulStepNo, ulTotalStepNo, FALSE);
}
for (j=MAX(i+1, jstart+1); j<=jend; ++j) {
(*ulStepNo)++;
private_step_no++;
/*LOG_DEBUG("comparing pair %d:%d", i, j);*/
make_ptrs(zzb, zzd, j, seqlen_array[j], aln_param.ktup, max_res_code, seq_array);
pair_align(i, seqlen_array[i], seqlen_array[j], max_res_code, &aln_param,
seq_array, &maxsf, accum, max_aln_length, zza, zzb, zzc, zzd);
if (!maxsf) {
calc_score=0.0;
} else {
calc_score=(double)accum[0][maxsf];
if (percent) {
dsr=(seqlen_array[i]<seqlen_array[j]) ?
seqlen_array[i] : seqlen_array[j];
calc_score = (calc_score/(double)dsr) * 100.0;
}
}
/* printf("%d %d %d\n", i-1, j-1, (100.0 - calc_score)/100.0); */
SymMatrixSetValue(tmat, i-1, j-1, (100.0 - calc_score)/100.0);
/* the function allows you not to compute the full matrix.
* here we explicitely make the resulting matrix a
* rectangle, i.e. we always set full rows. in other
* words, if we don't complete the full matrix then we
* don't have a full symmetry. so only use the defined
* symmetric part. AW
*/
/*LOG_DEBUG("setting %d : %d = %f", j, i, tmat[i][j]);*/
/* not needed anymore since we use symmatrix_t
if (j<=iend) {
tmat[j][i] = tmat[i][j];
}
*/
#ifdef HAVE_OPENMP
#pragma omp critical(ktuple)
#endif
{
Log(&rLog, LOG_DEBUG, "K-tuple distance for sequence pair %d:%d = %lg",
i, j, SymMatrixGetValue(tmat, i-1, j-1));
}
}
}
/*
Log(&rLog, LOG_FORCED_DEBUG, "uTotalStepNo=%d for istart=%d iend=%d jstart=%d jend=%d", uStepNo, istart, iend, jstart, jend);
Log(&rLog, LOG_FORCED_DEBUG, "Fabian = %d", iend*jend - iend*iend/2 + iend/2);
*/
/* printf("\n\n%d\t%d\t%d\t%d\n\n", omp_get_thread_num(), uStepNo, istart, iend); */
for (i=0;i<5;i++) {
CKFREE(accum[i]);
}
CKFREE(accum);
#ifdef HAVE_OPENMP
#pragma omp critical(ktuple)
#if 0
{
printf("steps: %d\n", private_step_no);
}
#endif
#endif
CKFREE(zza);
CKFREE(zzb);
CKFREE(zzc);
CKFREE(zzd);
free(seqlen_array);
for (i=1; i<=mseq->nseqs; i++) {
CKFREE(seq_array[i]);
}
CKFREE(seq_array);
}
/* end of KTuplePairDist */
|
GB_binop__hypot_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__hypot_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp32)
// C=scalar+B GB (_bind1st__hypot_fp32)
// C=scalar+B' GB (_bind1st_tran__hypot_fp32)
// C=A+scalar GB (_bind2nd__hypot_fp32)
// C=A'+scalar GB (_bind2nd_tran__hypot_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = hypotf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = hypotf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_HYPOT || GxB_NO_FP32 || GxB_NO_HYPOT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__hypot_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__hypot_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__hypot_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__hypot_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = hypotf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__hypot_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = hypotf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = hypotf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__hypot_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = hypotf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__hypot_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.