source
stringlengths
3
92
c
stringlengths
26
2.25M
core_damax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_dzamax.c, normal z -> d, Fri Sep 28 17:38:20 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ void plasma_core_omp_damax(int colrow, int m, int n, const double *A, int lda, double *values, plasma_sequence_t *sequence, plasma_request_t *request) { switch (colrow) { case PlasmaColumnwise: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:values[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { values[j] = fabs(A[lda*j]); for (int i = 1; i < m; i++) { double tmp = fabs(A[lda*j+i]); if (tmp > values[j]) values[j] = tmp; } } } } break; case PlasmaRowwise: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:values[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) values[i] = fabs(A[i]); for (int j = 1; j < n; j++) { for (int i = 0; i < m; i++) { double tmp = fabs(A[lda*j+i]); if (tmp > values[i]) values[i] = tmp; } } } } break; } }
GB_unaryop__abs_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int16 // op(A') function: GB_tran__abs_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int16 ( int32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file matrix_op-inl.h * \brief Function definition of matrix related operators */ #ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_ #include <mxnet/operator_util.h> #include <vector> #include <algorithm> #include <utility> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "../channel_op_common.h" #include "../mxnet_op.h" #include "broadcast_reduce_op.h" #include "./init_op.h" #include "../../common/static_array.h" #include "./slice-inl.h" #if MXNET_USE_CUDA #include <thrust/device_vector.h> #endif #ifdef __CUDACC__ #include "./pseudo2DTranspose_op-inl.cuh" #endif namespace mxnet { namespace op { struct ReshapeParam : public dmlc::Parameter<ReshapeParam> { mxnet::TShape target_shape; bool keep_highest; mxnet::Tuple<int> shape; bool reverse; DMLC_DECLARE_PARAMETER(ReshapeParam) { DMLC_DECLARE_FIELD(shape) .set_default(mxnet::Tuple<int>()) .describe("The target shape"); DMLC_DECLARE_FIELD(reverse) .set_default(false) .describe("If true then the special values are inferred from right to left"); DMLC_DECLARE_FIELD(target_shape) .set_default(mxnet::TShape(0, -1)) .describe("(Deprecated! Use ``shape`` instead.) " "Target new shape. One and only one dim can be 0, " "in which case it will be inferred from the rest of dims"); DMLC_DECLARE_FIELD(keep_highest).set_default(false) .describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged." "If set to true, then the first dim in target_shape is ignored," "and always fixed as input"); } bool operator==(const ReshapeParam &other) const { return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest && this->shape == other.shape && this->reverse == other.reverse; } }; template<typename IType> inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape, const mxnet::TShape& dshape, bool reverse) { std::vector<IType> dshape_vec; std::vector<IType> param_shape_vec(shape.begin(), shape.end()); for (int i = 0; i < dshape.ndim(); ++i) { dshape_vec.push_back(dshape[i]); } std::vector<IType> tmp; size_t src_idx = 0; int inf_idx = -1; if (reverse) { std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(param_shape_vec.begin(), param_shape_vec.end()); } auto dshape_len = dshape_vec.size(); auto params_len = param_shape_vec.size(); for (size_t i = 0; i < params_len; ++i) { IType proposed_dim = param_shape_vec[i]; if (proposed_dim == 0) { // keep same CHECK_LT(src_idx, dshape_len); tmp.push_back(dshape_vec[src_idx++]); } else if (proposed_dim == -1) { // infer CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred"; inf_idx = i; tmp.push_back(1); src_idx++; } else if (proposed_dim == -2) { // copy all remaining dims from source while (src_idx < dshape_len) { const int dn = dshape_vec[src_idx++]; tmp.push_back(dn); } } else if (proposed_dim == -3) { // merge two dims from source CHECK_LT(src_idx, dshape_len-1); const int d1 = dshape_vec[src_idx++]; const int d2 = dshape_vec[src_idx++]; if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) { tmp.push_back(-1); } else { tmp.push_back(d1 * d2); } } else if (proposed_dim == -4) { // split the source dim s into two dims // read the left dim and then the right dim (either can be -1) CHECK_LT(i + 2, params_len); CHECK_LT(src_idx, dshape_len); const int d0 = dshape_vec[src_idx++]; IType d1 = param_shape_vec[++i]; IType d2 = param_shape_vec[++i]; CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1."; if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) << "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0; tmp.push_back(d1); tmp.push_back(d2); } else { // greater than 0, new shape tmp.push_back(proposed_dim); src_idx++; } } if (inf_idx >= 0) { if (shape_is_known(dshape)) { IType new_size = 1; for (IType x : tmp) new_size *= x; tmp[inf_idx] = dshape.Size() / new_size; } else { tmp[inf_idx] = -1; } } if (reverse) { std::reverse(param_shape_vec.begin(), param_shape_vec.end()); std::reverse(dshape_vec.begin(), dshape_vec.end()); std::reverse(tmp.begin(), tmp.end()); } mxnet::TShape oshape(tmp.begin(), tmp.end()); return oshape; } inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) { if (shape_is_known(*in) && shape_is_known(out)) { return true; } else if (!shape_is_known(out)) { return false; } else { int zero_axis = -1; int known_dim_size_prod = 1; for (int i = 0; i < in->ndim(); i++) { if (!mxnet::dim_size_is_known(*in, i)) { if (zero_axis != -1) return false; // more than 1 zero found. else zero_axis = i; } else { known_dim_size_prod *= (*in)[i]; } } (*in)[zero_axis] = out.Size() / known_dim_size_prod; return true; } } inline bool ReshapeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape &dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape oshape; if (param_.shape.ndim() != 0) { oshape = InferReshapeShape(param_.shape, dshape, param_.reverse); } else if (param_.target_shape.ndim() != -1) { LOG(INFO) << "Using target_shape will be deprecated."; oshape = param_.target_shape; int neg_count = 0; index_t inf_idx = 0; index_t start_idx = param_.keep_highest ? 1 : 0; if (param_.keep_highest) { oshape[0] = dshape[0]; } for (int i = start_idx; i < oshape.ndim(); ++i) { if (oshape[i] == 0) { neg_count++; inf_idx = i; } } if (neg_count == 1) { oshape[inf_idx] = 1; oshape[inf_idx] = dshape.Size() / oshape.Size(); } } else { return shape_is_known((*out_attrs)[0]) && ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } ReverseReshapeInferShape(&dshape, oshape); #if 0 CHECK_EQ(oshape.Size(), dshape.Size()) << "Target shape size is different to source. " << "Target: " << oshape << "\nSource: " << dshape; #endif SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]); } inline bool FlattenShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape &dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; int target_dim = 1; for (int i = 1; i < dshape.ndim(); ++i) { target_dim *= dshape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim)); return true; } struct TransposeParam : public dmlc::Parameter<TransposeParam> { mxnet::TShape axes; DMLC_DECLARE_PARAMETER(TransposeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1)) .describe("Target axis order. By default the axes will be inverted."); } bool operator==(const TransposeParam &other) const { return this->axes == other.axes; } }; /*! * \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache * \param in input tensor * \param out output tensor * \param row shape of dim 0 of input * \param col shape of dim 1 of input * \tparam DType Data type * \tparam is_addto */ template<typename DType, bool is_addto> MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) { // ensure cache line hits and prevent cache miss for any configuration // L1 cache size to be utilized = 32kb = 2^15 // Largest size of a single unit of any dtype <= 8 byte = 2^3 // Number of elements - (2^15/2^3) = 2^12 // Block-size - 2^6 v 2^6 (64 v 64) // But we could leverage unrolling of for loops (for parallelization) // Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled // blocksize * blocksize * num_threads = cache_size / dtype_size // Instead of explicit unroll, let compiler figure out optimal unroll factor const index_t blocksize = 32; // collapse 2 parallelizes 2 for loops // inner 2 for loops aren't parallelized to prevent cache miss // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (index_t i = 0; i < row; i += blocksize) { for (index_t j = 0; j < col; j += blocksize) { // transpose the block for (index_t a = j; (a < blocksize + j) && (a < col); ++a) { for (index_t b = i; (b < blocksize + i) && (b < row); ++b) { if (!is_addto) { out[a * row + b] = in[b * col + a]; } else { out[a * row + b] += in[b * col + a]; } } } } } } inline bool IsIdentityTranspose(const TShape& axes) { for (dim_t i = 0; i < axes.ndim(); i++) { if (axes[i] != i) return false; } return true; } template<typename xpu, bool is_addto = false> void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(src.type_flag_, ret.type_flag_); // zero-size tensor, no need to compute if (src.shape_.Size() == 0U) return; Stream<xpu> *s = ctx.get_stream<xpu>(); #ifdef __CUDACC__ // This transpose can be used only if there exist n and m such that: // params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1) // Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3). if (isPseudo2DTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { transpose_pseudo2D<DType, is_addto>(ret, src, axes, s); }); return; } #endif // Special handle the identity case if (IsIdentityTranspose(axes)) { MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s); Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s); if (!is_addto) { // Use memcpy to accelerate the speed Copy(out, in, s); } else { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch( s, ret.Size(), out.dptr_, in.dptr_); } }); return; } // Handle the general transpose case MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, { switch (axes.ndim()) { case 2: { Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s); Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s); if (ctx.get_ctx().dev_mask() == cpu::kDevMask) { Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]); } else { LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case " "in GPU has been covered by transpose_pseudo2D." " Report an issue in Github."; } break; } case 3: { Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s); Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s); if (!is_addto) { out = transpose(in, axes.get<3>()); } else { out += transpose(in, axes.get<3>()); } break; } case 4: { Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s); Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s); if (!is_addto) { out = transpose(in, axes.get<4>()); } else { out += transpose(in, axes.get<4>()); } break; } case 5: { Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s); Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s); if (!is_addto) { out = transpose(in, axes.get<5>()); } else { out += transpose(in, axes.get<5>()); } break; } case 6: { Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s); Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s); if (!is_addto) { out = transpose(in, axes.get<6>()); } else { out += transpose(in, axes.get<6>()); } break; } default: LOG(FATAL) << "Transpose support at most 6 dimensions"; break; } }); } // matrix transpose template<typename xpu> void Transpose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (req[0] == kNullOp) { return; } const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK(req[0] == kWriteTo || req[0] == kAddTo) << "Transpose only supports kNullOp, kWriteTo and kAddTo"; mxnet::TShape axes; if (param.axes.ndim() == 0) { axes = mxnet::TShape(inputs[0].ndim(), -1); for (int i = 0; i < axes.ndim(); ++i) { axes[i] = axes.ndim() - 1 - i; } } else { axes = common::CanonicalizeAxes(param.axes); } if (req[0] == kAddTo) { TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes); } else { TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes); } } inline bool TransposeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& shp = (*in_attrs)[0]; mxnet::TShape& out_shp = (*out_attrs)[0]; CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions"; if (shp.ndim() == -1 && out_shp.ndim() == -1) return false; // none of the shapes is known if (out_shp.ndim() >= 0 && shp.ndim() >= 0) CHECK_EQ(out_shp.ndim(), shp.ndim()); mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1); mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1); if (param.axes.ndim() == 0) { for (int i = 0; i < shp.ndim(); ++i) { ret[i] = shp[shp.ndim()-1-i]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[shp.ndim()-1-i] = out_shp[i]; } } else { CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim()); for (int i = 0; i < shp.ndim(); ++i) { CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim())); ret[i] = shp[param.axes[i]]; } for (int i = 0; i < out_shp.ndim(); ++i) { get[param.axes[i]] = out_shp[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 0, get); SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); return shape_is_known(ret); } struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> { int axis; DMLC_DECLARE_PARAMETER(ExpandDimParam) { DMLC_DECLARE_FIELD(axis) .describe("Position where new axis is to be inserted. Suppose that " "the input `NDArray`'s dimension is `ndim`, the range of " "the inserted axis is `[-ndim, ndim]`"); } bool operator==(const ExpandDimParam &other) const { return this->axis == other.axis; } }; inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) { return false; } mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& oshape = (*out_attrs)[0]; int indim = ishape.ndim(); bool unknown_ishape = false; if (-1 == indim) { indim = oshape.ndim() - 1; unknown_ishape = true; } int axis = param.axis; if (axis < 0) { axis += indim + 1; } CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim << "] (" << param.axis << " provided)"; mxnet::TShape ret(indim + 1, -1); for (int i = 0; i < axis; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i]); } ret[axis] = 1; for (int i = axis+1; i < indim+1; ++i) { ret[i] = (unknown_ishape? -1 : ishape[i-1]); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); ret = mxnet::TShape(indim, -1); for (int i = 0; i < axis; ++i) ret[i] = oshape[i]; for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i]; SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret); return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0)); } // Currently MKLDNN only supports step = 1 or step has no value inline bool SupportMKLDNNSlice(const SliceParam& param) { if (param.step.ndim() == 0U) return true; for (int i = 0; i < param.step.ndim(); ++i) { if (param.step[i].has_value() && param.step[i].value() != 1) return false; } return true; } inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1); CHECK_EQ(out_attrs->size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); const auto& in_stype = in_attrs->at(0); auto& out_stype = out_attrs->at(0); bool dispatched = false; const auto dispatch_ex = DispatchMode::kFComputeEx; // If step = 1, no need to fallback; otherwise fallback to dense bool trivial_step = false; if (param.step.ndim() == 0U) { trivial_step = true; } else if (param.step.ndim() == 1U && (!param.step[0].has_value() || param.step[0].value() == 1)) { trivial_step = true; } if (in_stype == kDefaultStorage) { #if MXNET_USE_MKLDNN == 1 if (dev_mask == Context::kCPU && MKLDNNEnvSet() && SupportMKLDNNSlice(param)) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex); } #endif if (!dispatched) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } } if (!dispatched && in_stype == kCSRStorage && trivial_step) { dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } // slice the indptr of a csr struct SliceCsrIndPtr { template<typename IType> MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) { KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base); } }; /* * a wrapper to launch SliceCsrIndPtr kernel. * slice [src[begin] .. src[end]) and store in dst[0, end - begin) */ template<typename xpu, typename IType> void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx, const IType* src, IType* dst) { using namespace mshadow; using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); int indptr_len = end - begin + 1; Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin); } /* * Slice a CSR NDArray for first dimension */ template<typename xpu> void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out) { using namespace mshadow; using namespace mxnet_op; using namespace csr; nnvm::dim_t begin_row = begin[0]; nnvm::dim_t end_row = end[0]; nnvm::dim_t indptr_len = end_row - begin_row + 1; out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len)); // assume idx indptr share the same type MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, { MSHADOW_TYPE_SWITCH(in.dtype(), DType, { RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>(); RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>(); SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr); Stream<xpu> *s = ctx.get_stream<xpu>(); RType nnz = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)), Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s)); // return csr zeros if nnz = 0 if (nnz == 0) { out.set_aux_shape(kIdx, Shape1(0)); return; } // copy indices and values out.CheckAndAllocAuxData(kIdx, Shape1(nnz)); out.CheckAndAllocData(Shape1(nnz)); IType* in_idx = in.aux_data(kIdx).dptr<IType>(); IType* out_idx = out.aux_data(kIdx).dptr<IType>(); DType* in_data = in.data().dptr<DType>(); DType* out_data = out.data().dptr<DType>(); RType offset = 0; mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)), Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s)); mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s), Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s); mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s), Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s); }); }); }); } /*! * \brief slice a CSRNDArray for two dimensions */ struct SliceDimTwoCsrAssign { /*! * \brief This function slices a CSRNDArray on axis one between begin_col and end_col * \param i loop index * \param out_idx output csr ndarray column indices * \param out_data output csr ndarray data * \param out_indptr output csr ndarray row index pointer * \param in_idx input csr ndarray column indices * \param in_data input csr ndarray data * \param in_indptr input csr ndarray row index pointer * \param begin_col begin column indice * \param end_col end column indice */ template<typename IType, typename RType, typename DType> MSHADOW_XINLINE static void Map(int i, IType* out_idx, DType* out_data, const RType* out_indptr, const IType* in_idx, const DType* in_data, const RType* in_indptr, const int begin_col, const int end_col) { RType ind = out_indptr[i]; for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) { // indices of CSRNDArray are in ascending order per row if (in_idx[j] >= end_col) { break; } else if (in_idx[j] >= begin_col) { out_idx[ind] = in_idx[j] - begin_col; out_data[ind] = in_data[j]; ind++; } } } }; /* * Slice a CSR NDArray for two dimensions */ template<typename xpu> void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx, const NDArray &in, const NDArray &out); template<typename xpu> void SliceCsrImpl(const SliceParam &param, const OpContext& ctx, const NDArray &in, OpReqType req, const NDArray &out) { if (req == kNullOp) return; CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported"; CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported"; const mxnet::TShape ishape = in.shape(); const mxnet::TShape oshape = out.shape(); int N = ishape.ndim(); mxnet::TShape begin(N, -1), end(N, -1); for (int i = 0; i < N; ++i) { int s = 0; if (i < param.begin.ndim() && param.begin[i]) { s = *param.begin[i]; if (s < 0) s += ishape[i]; } begin[i] = s; end[i] = s + oshape[i]; } switch (N) { case 1: { SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out); break; } case 2: { SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out); break; } default: LOG(FATAL) << "CSR is only for 2-D shape"; break; } } template<typename xpu> void SliceEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs.size(), 1); CHECK_EQ(outputs.size(), 1); const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); auto in_stype = inputs[0].storage_type(); if (in_stype == kCSRStorage) { SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]); } else { LOG(FATAL) << "Slice not implemented for storage type" << in_stype; } } template<int ndim> inline bool GetIndexRange(const mxnet::TShape& dshape, const mxnet::Tuple<dmlc::optional<index_t>>& param_begin, const mxnet::Tuple<dmlc::optional<index_t>>& param_end, const mxnet::Tuple<dmlc::optional<index_t>>& param_step, common::StaticArray<index_t, ndim>* begin, common::StaticArray<index_t, ndim>* end, common::StaticArray<index_t, ndim>* step) { // Function returns false if output is zero-sized, true otherwise. bool zero_size_shape = false; CHECK_NE(dshape.ndim(), 0U); CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions"; CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length"; CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim << " is not equal to data shape ndim=" << dshape.ndim(); if (param_step.ndim() > 0) { CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length"; } for (int i = 0; i < param_begin.ndim(); ++i) { index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1; CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0"; index_t b = 0, e = 0; const index_t len = dshape[i]; if (len > 0) { b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0); e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len); if (b < 0) { b += len; } if (e < 0 && param_end[i].has_value()) { e += len; } // move the begin and end to correct position for calculating dim size b = (b < 0 && s > 0) ? 0 : b; b = (b > len - 1 && s < 0) ? len - 1 : b; // if the start value lead to empty tensor under step s, use -1 for indication b = (b < 0 || b > len - 1) ? -1 : b; e = e > -1 ? e : -1; e = e > len ? len : e; } else if (len == 0) { b = 0; e = 0; } (*begin)[i] = b; (*end)[i] = e; (*step)[i] = s; // checking begin==end if (b == e) { zero_size_shape = true; } } for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) { (*begin)[i] = 0; (*end)[i] = dshape[i]; (*step)[i] = 1; } return zero_size_shape; } inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape, const index_t i, const index_t b, const index_t e, const index_t s, mxnet::TShape* oshape) { if (!mxnet::dim_size_is_known(dshape, i)) { (*oshape)[i] = -1; return; } if (e != b && b >= 0) { if (s > 0) { (*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0; } else { (*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0; } } else { (*oshape)[i] = 0; } } inline bool SliceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0"; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); mxnet::TShape oshape = dshape; MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape); } }) SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(dshape) && shape_is_known(oshape); } template<int ndim, int req, typename xpu> struct slice_forward; template<int ndim, int req> struct slice_forward<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } }; template<int ndim, int req> struct slice_forward<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data, const mshadow::Shape<ndim> dshape, const mshadow::Shape<ndim> oshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = dshape[ndim-1]; const index_t out_last_dim_size = oshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t out_offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D data index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % oshape[k]) * step[k] + begin[k]); idx /= oshape[k]; stride *= dshape[k]; } KERNEL_ASSIGN(out[out_offset++], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]); } } }; template<typename xpu> void SliceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (out.Size() == 0) return; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { size_t num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<int ndim, int req, typename xpu> struct slice_assign; template<int ndim, int req> struct slice_assign<ndim, req, cpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; index_t offset = i * out_last_dim_size; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]); } } }; template<int ndim, int req> struct slice_assign<ndim, req, gpu> { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; const index_t j = i % out_last_dim_size; index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i / out_last_dim_size; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]); } }; template<typename xpu> void SliceOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); if (req[0] == kNullOp) return; using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_backward does not support kWriteInplace"; } if (ograd.Size() == 0) return; MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(dshape)) return false; mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(dshape.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape); } }) SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename xpu> void SliceAssignOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs CHECK_EQ(outputs.size(), 1U); if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& val = inputs[1]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace"; } const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspace needs no operation. } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = val.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= val.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), val.dptr<DType>(), out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step); }) }) }) } struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> { double scalar; mxnet::Tuple<dmlc::optional<index_t>> begin, end; mxnet::Tuple<dmlc::optional<index_t>> step; DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) { DMLC_DECLARE_FIELD(scalar) .set_default(0) .describe("The scalar value for assignment."); DMLC_DECLARE_FIELD(begin) .describe("starting indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(end) .describe("ending indices for the slice operation, supports negative indices."); DMLC_DECLARE_FIELD(step) .set_default(mxnet::Tuple<dmlc::optional<index_t>>()) .describe("step for the slice operation, supports negative values."); } }; inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = (*in_attrs)[0]; if (!shape_is_known(dshape)) return false; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<int ndim> struct slice_assign_scalar { // i is the i-th row after flattening out into 2D tensor template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val, const OpReqType req, const mshadow::Shape<ndim> oshape, const mshadow::Shape<ndim> vshape, const common::StaticArray<index_t, ndim> begin, const common::StaticArray<index_t, ndim> step) { const index_t data_last_dim_size = oshape[ndim-1]; const index_t out_last_dim_size = vshape[ndim-1]; const index_t step_last_dim = step[ndim-1]; const index_t begin_last_dim = begin[ndim-1]; for (index_t j = 0; j < out_last_dim_size; ++j) { index_t irow = 0; // row id of flattend 2D out index_t stride = 1; index_t idx = i; #pragma unroll for (int k = ndim - 2; k >= 0; --k) { irow += stride * ((idx % vshape[k]) * step[k] + begin[k]); idx /= vshape[k]; stride *= oshape[k]; } KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val); } } }; template<typename xpu> void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow; Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; if (req[0] == kWriteTo) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s); Copy(out, in, s); }); } else if (req[0] != kWriteInplace) { LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace"; } mxnet::TShape vshape = data.shape_; const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step); if (zero_size_shape) { return; // slice_assign of zero-sized subspaced needs no operation. } for (index_t i = 0; i < param.begin.ndim(); ++i) { const int b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape); } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0], out.dptr<DType>(), static_cast<DType>(param.scalar), req[0], out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step); }) }) } struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> { int axis; index_t begin; dmlc::optional<index_t> end; DMLC_DECLARE_PARAMETER(SliceAxisParam) { DMLC_DECLARE_FIELD(axis) .describe("Axis along which to be sliced, supports negative indexes."); DMLC_DECLARE_FIELD(begin) .describe("The beginning index along the axis to be sliced, " " supports negative indexes."); DMLC_DECLARE_FIELD(end) .describe("The ending index along the axis to be sliced, " " supports negative indexes."); } }; inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape, int* axis, index_t* begin, index_t* end) { *axis = param.axis; if (*axis < 0) { *axis += ishape.ndim(); } CHECK(*axis < ishape.ndim() && *axis >= 0) << "Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" << param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis; index_t axis_size = static_cast<index_t>(ishape[*axis]); *begin = param.begin; *end = -1; if (*begin < 0) { *begin += axis_size; } if (axis_size > 0) { if (!static_cast<bool>(param.end)) { *end = axis_size; } else { *end = param.end.value(); if (*end < 0) { *end += axis_size; } } CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size; CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; } else { *begin = 0; *end = 0; } CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end; CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin; } inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; if (!mxnet::ndim_is_known(ishape)) return false; int axis; index_t begin, end; GetSliceAxisParams(param, ishape, &axis, &begin, &end); if (!mxnet::dim_size_is_known(ishape, axis)) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return false; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = static_cast<index_t>(end - begin); } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return shape_is_known(shape); } template<typename xpu> void SliceAxis(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow::expr; const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s); ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end)); }); } } // Backward pass of broadcast over the given axis template<typename xpu> void SliceAxisGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { if (outputs[0].shape_.Size() == 0) { return; } const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed); using namespace mshadow::op; using namespace mshadow::expr; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); int axis; index_t begin, end; GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end); int ndim = outputs[0].shape_.ndim(); if (axis + 1 == ndim) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s); mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } else { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s); mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s); if (req[0] == kAddTo) { slice<1>(igrad, begin, end) += F<identity>(ograd); } else if (req[0] == kWriteTo) { igrad = 0.0f; slice<1>(igrad, begin, end) = F<identity>(ograd); } else { CHECK_EQ(req[0], kNullOp); } }); } } struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> { mxnet::Tuple<int> axes; DMLC_DECLARE_PARAMETER(SliceLikeParam) { DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>()) .describe("List of axes on which input data will be sliced according to the " "corresponding size of the second input. By default will slice on " "all axes. Negative axes are supported."); } }; inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 2U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape& ishape = (*in_attrs)[0]; mxnet::TShape& from_shape = (*in_attrs)[1]; if (param.axes.ndim() == 0) { CHECK_EQ(ishape.ndim(), from_shape.ndim()) << "By default slice_axis performs slice on all axes, but ndim mismatch " "for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim(); for (int i = 0; i < ishape.ndim(); ++i) { CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i] << "exceeds limit of input with size " << ishape[i]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape); } else { mxnet::TShape shape(ishape); for (int i = 0; i < param.axes.ndim(); ++i) { int axis = param.axes[i]; if (axis < 0) { axis += ishape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small"; CHECK_GT(ishape.ndim(), axis) << "Slice axis: " << axis << " exceeds first input: " << ishape.ndim(); CHECK_GT(from_shape.ndim(), axis) << "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim(); shape[axis] = from_shape[axis]; CHECK_GE(ishape[axis], from_shape[axis]) << "Slice axis " << axis << " with size " << from_shape[axis] << "exceeds limit of input with size " << ishape[axis]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return true; } inline void SliceLikeInferRanges(const mxnet::TShape& dshape, const mxnet::TShape& fshape, const mxnet::Tuple<int>& axes, mxnet::Tuple<dmlc::optional<index_t>>* param_begin, mxnet::Tuple<dmlc::optional<index_t>>* param_end, mxnet::Tuple<dmlc::optional<index_t>>* param_step) { std::vector<dmlc::optional<index_t>> pb(dshape.ndim()); std::vector<dmlc::optional<index_t>> pe(dshape.ndim()); std::vector<dmlc::optional<index_t>> ps(dshape.ndim()); if (axes.ndim() == 0) { for (int i = 0; i < dshape.ndim(); ++i) { pb[i] = 0; pe[i] = fshape[i]; ps[i] = 1; } } else { for (int i = 0; i < axes.ndim(); ++i) { int axis = axes[i]; if (axis < 0) { axis += dshape.ndim(); } CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small"; CHECK_LT(axis, dshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << dshape.ndim(); CHECK_LT(axis, fshape.ndim()) << "Slice axis: " << axis << " exceeds first input: " << fshape.ndim(); pb[axis] = 0; pe[axis] = fshape[axis]; ps[axis] = 1; } } *param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end()); *param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end()); *param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end()); } template<typename xpu> void SliceLikeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); using namespace mshadow::expr; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& data = inputs[0]; const TBlob& out = outputs[0]; const mxnet::TShape& ishape = data.shape_; const mxnet::TShape& from_shape = inputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(data.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = out.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= out.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads, out.dptr<DType>(), data.dptr<DType>(), data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step); }) }) }) } template<typename xpu> void SliceLikeBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U); CHECK_EQ(req.size(), 2U); using namespace mshadow; Stream<xpu>* s = ctx.get_stream<xpu>(); if (req[1] != kNullOp && req[1] != kAddTo) { Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients. } if (req[0] == kNullOp) return; const TBlob& ograd = inputs[0]; const TBlob& igrad = outputs[0]; const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed); if (req[0] == kWriteTo) { Fill(s, igrad, req[0], 0); } else if (req[0] == kWriteInplace) { LOG(FATAL) << "_slice_like_backward does not support kWriteInplace"; } const mxnet::TShape& ishape = ograd.shape_; const mxnet::TShape& from_shape = outputs[1].shape_; mxnet::Tuple<dmlc::optional<index_t>> param_begin; mxnet::Tuple<dmlc::optional<index_t>> param_end; mxnet::Tuple<dmlc::optional<index_t>> param_step; SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step); MXNET_NDIM_SWITCH(ograd.ndim(), ndim, { common::StaticArray<index_t, ndim> begin, end, step; GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step); MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { int num_threads = ograd.shape_.FlatTo2D()[0]; if (std::is_same<xpu, gpu>::value) { num_threads *= ograd.shape_.get<ndim>()[ndim - 1]; } mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads, igrad.dptr<DType>(), ograd.dptr<DType>(), igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step); }) }) }) } struct ClipParam : public dmlc::Parameter<ClipParam> { real_t a_min, a_max; DMLC_DECLARE_PARAMETER(ClipParam) { DMLC_DECLARE_FIELD(a_min) .describe("Minimum value"); DMLC_DECLARE_FIELD(a_max) .describe("Maximum value"); } }; struct clip { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = a_max; } else if (data < a_min) { out[i] = a_min; } else { out[i] = data; } } }; struct clip_grad { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas, const float a_min, const float a_max) { DType data = datas[i]; if (data > a_max) { out[i] = 0; } else if (data < a_min) { out[i] = 0; } else { out[i] = grad[i]; } } }; template<typename xpu> void Clip(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), param.a_min, param.a_max); }); } template<typename xpu> void ClipEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { CHECK_EQ(inputs[0].dtype(), outputs[0].dtype()); CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type()); CHECK_NE(inputs[0].storage_type(), kDefaultStorage); UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>); } template<typename xpu> void ClipGrad_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max); }); } /*! * \brief The parameters of the repeat operator include * the number of repeating time and axis (optional). * The parameters will be later used to deduce the * output ndarray shape in bool RepeatShape() function. */ struct RepeatParam : public dmlc::Parameter<RepeatParam> { int repeats = 1; dmlc::optional<int> axis; DMLC_DECLARE_PARAMETER(RepeatParam) { DMLC_DECLARE_FIELD(repeats) .describe("The number of repetitions for each element."); DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<int>()) .describe("The axis along which to repeat values." " The negative numbers are interpreted counting from the backward." " By default, use the flattened input array," " and return a flat output array."); } }; /*! * \brief Helper function for getting user input params for the operator repeat. * Sanity check the user input values. */ inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape, int* repeats, dmlc::optional<int>* axisOpt) { *repeats = param.repeats; CHECK_GE(*repeats, 0) << "repeats cannot be a negative number"; *axisOpt = param.axis; if (static_cast<bool>(*axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt->value(); if (axis < 0) { axis += ndims; } CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds"; } } inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& ishape = (*in_attrs)[0]; int repeats = 0; dmlc::optional<int> axisOpt; GetRepeatParams(param, ishape, &repeats, &axisOpt); // If 0 repeats, return an empty 1-dim, 0-size array if (0 == repeats) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0)); return true; } // If repeats > 0, multiply the size of the corresponding axis by repeats if (static_cast<bool>(axisOpt)) { int ndims = ishape.ndim(); int axis = axisOpt.value(); if (axis < 0) { axis += ndims; } mxnet::TShape shape(ishape.ndim(), -1); for (int i = 0; i < ishape.ndim(); ++i) { if (i == axis) { shape[i] = repeats * ishape[i]; } else { shape[i] = ishape[i]; } } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats mxnet::TShape shape(1, ishape.Size() * repeats); SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); } return shape_is_known(out_attrs->at(0)); } inline bool RepeatOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the funcitonality * of operator repeat. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp( const mxnet::TShape& ishape, const dmlc::optional<int>& axisOpt, const int repeats) { if (static_cast<bool>(axisOpt)) { int axis = axisOpt.value(); int ndim = ishape.ndim(); if (axis < 0) { axis += ndim; } CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis"; // reshape the input tensor by adding a dim at the (axis+1)-th dim mxnet::TShape rshape(ishape.ndim()+1, 1); // the shape we want to broadcast to mxnet::TShape bshape(rshape.ndim(), 1); int i = 0; while (i <= axis) { rshape[i] = bshape[i] = ishape[i]; ++i; } rshape[i] = 1; bshape[i] = repeats; while (i < ishape.ndim()) { rshape[i+1] = ishape[i]; bshape[i+1] = ishape[i]; ++i; } return std::make_pair(rshape, bshape); } else { // axis is not input by user // reshape the tensor into shape (ishape.Size(), 1) // then add one dim at axis = 1 and broadcast to // shape (ishape.Size(), repeats) mxnet::TShape rshape(2, 1); rshape[0] = ishape.Size(); rshape[1] = 1; mxnet::TShape bshape(2, 1); bshape[0] = rshape[0]; bshape[1] = repeats; return std::make_pair(rshape, bshape); } } template<typename xpu> void RepeatOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TBlob& iTBlob = inputs[0]; const mxnet::TShape& ishape = iTBlob.shape_; if (!shape_is_known(ishape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, ishape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = \ ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void RepeatOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const mxnet::TShape& oshape = outputs[0].shape_; if (!shape_is_known(oshape)) return; int repeats = 0; dmlc::optional<int> axisOpt; const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed); GetRepeatParams(param, oshape, &repeats, &axisOpt); if (0 == repeats) return; std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct TileParam : public dmlc::Parameter<TileParam> { mxnet::Tuple<int> reps; DMLC_DECLARE_PARAMETER(TileParam) { DMLC_DECLARE_FIELD(reps) .describe("The number of times for repeating the tensor a. Each dim size of reps" " must be a positive integer." " If reps has length d, the result will have dimension of max(d, a.ndim);" " If a.ndim < d, a is promoted to be d-dimensional by prepending new axes." " If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it."); } }; inline bool TileOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const TileParam& param = nnvm::get<TileParam>(attrs.parsed); const mxnet::TShape& ishape = (*in_attrs)[0]; if (!shape_is_known(ishape)) { return false; } const mxnet::Tuple<int>& reps = param.reps; // If reps is empty, return a identical input array if (reps.ndim() == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape); return true; } mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = oshape.ndim() - 1; i >= 0; --i) { if (i1 >= 0 && i2 >= 0) { oshape[i] = ishape[i1--] * reps[i2--]; } else if (i1 >= 0) { oshape[i] = ishape[i1--]; } else if (i2 >= 0) { oshape[i] = reps[i2--]; } } // If reps contains 0s, oshape is a zero-size shape. // Need to distinguish between np_shape mode and legacy mode. if (!Imperative::Get()->is_np_shape()) { common::ConvertToNumpyShape(&oshape); } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } inline bool TileOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if ((*in_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]); } else if ((*out_attrs)[0] != -1) { TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]); } return true; } /*! * \brief Reshape the input and output tensors for * using broadcast_to to achieve the functionality * of operator tile. * \return a pair of mxnet::TShape's, first is the reshaped * input shape, second is the reshaped output shape. */ inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp( const mxnet::TShape& ishape, const mxnet::Tuple<int>& reps) { if (reps.ndim() == 0) { return std::make_pair(ishape, ishape); } // The shape we want to broadcast to mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1); // The shape of the input tensor after adding new axes before each dim mxnet::TShape rshape(bshape.ndim(), 1); int i1 = ishape.ndim() - 1; int i2 = reps.ndim() - 1; for (int i = bshape.ndim() - 1; i >= 0; --i) { if (0 == (i & 1)) { bshape[i] = (i2 >= 0? reps[i2--] : 1); rshape[i] = 1; } else { rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1); } } return std::make_pair(rshape, bshape); } /*! * \brief Implementation of tiling the input tensor a based * on the user-input shape, reps. * If a.ndim < reps.ndim, new axes are pre-pended to a. For example, * the input tensor has shape (3,), and the reps is (2, 4); the input * tensor would be reshaped to (1, 3). * If a.ndim > reps.ndim, pre-pending 1's to reps. For example, * the input tensor has shape (2, 3, 4, 5), and reps is (2, 2); * the reps would be changed to (1, 1, 2, 2). * Suppose we have a.ndim = reps.ndim now. To achieve tiling, * we utilize the operator broadcast_to. For example, for a tensor * of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape * the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding * one axis before each dimension. Then, we want to broadcast * the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final * output tensor would have shape (2*2, 8*3, 9*4, 3*5). */ template<typename xpu> void TileOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& ishape = inputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps); // reshaped input tblob TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; // reshaped output tblob TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs); } /*! * \brief Compute the gradient of the loss function * with respect to the input of the operator. * Backpropagation is employed to implement the * chain rule. * \param inputs the gradient of the loss function * with respect to the outputs of the operator * \param outputs the gradient of the loss function * with respect to the inputs of the operator */ template<typename xpu> void TileOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (inputs[0].Size() == 0) return; const mxnet::TShape& oshape = outputs[0].shape_; const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps; // If any one of the number in reps is zero, return immediately for (int i = 0; i < reps.ndim(); ++i) { if (0 == reps[i]) return; } std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps); // reshaped output grad tblob TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(), outputs[0].type_flag_, outputs[0].dev_id()); std::vector<TBlob> newOutputs = {oblob}; // reshaped input grad tblob TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(), inputs[0].type_flag_, inputs[0].dev_id()); std::vector<TBlob> newInputs = {iblob}; ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>( ctx, newInputs, req, newOutputs, rshapes.first); } struct ReverseParam : public dmlc::Parameter<ReverseParam> { mxnet::Tuple<int> axis; DMLC_DECLARE_PARAMETER(ReverseParam) { DMLC_DECLARE_FIELD(axis) .describe("The axis which to reverse elements."); } }; #define REVERSE_MAX_DIM 10U struct reverse { MSHADOW_XINLINE static index_t ReverseIndex(index_t idx, index_t nreversedim, const index_t * stride_, const index_t * trailing_) { index_t outputIndex = idx; for (index_t i = 0; i < nreversedim; ++i) { const index_t low = outputIndex % trailing_[i]; index_t high = outputIndex / trailing_[i]; const index_t x = high%stride_[i]; high /= stride_[i]; outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low; } return outputIndex; } #ifdef __CUDACC__ template<typename DType> __device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { __shared__ index_t stride_share[REVERSE_MAX_DIM]; __shared__ index_t trailing_share[REVERSE_MAX_DIM]; if (threadIdx.x < REVERSE_MAX_DIM) { stride_share[threadIdx.x] = stride_[threadIdx.x]; trailing_share[threadIdx.x] = trailing_[threadIdx.x]; } __syncthreads(); index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share); dst[new_idx] = src[index]; } #else template<typename DType> MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst, const index_t * stride_, const index_t * trailing_) { index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_); dst[new_idx] = src[index]; } #endif }; template<typename xpu> void ReverseOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed); CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_); CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM); Stream<xpu> *s = ctx.get_stream<xpu>(); const mxnet::TShape& ishape = inputs[0].shape_; std::vector<index_t> stride_(param.axis.ndim()); std::vector<index_t> trailing_(param.axis.ndim()); index_t reverse_index = 0; for (int axis : param.axis) { CHECK_LT(axis, ishape.ndim()); stride_[reverse_index] = ishape[axis]; trailing_[reverse_index] = 1; for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) { trailing_[reverse_index] *= ishape[i2]; } reverse_index++; } #ifdef __CUDACC__ mshadow::Tensor<xpu, 1, uint8_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, uint8_t>( mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s); auto stride_workspace = workspace.dptr_; auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t); cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), stride_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), trailing_.size() * sizeof(index_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); #endif #ifdef __CUDACC__ MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace)); }); #else MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index, inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), stride_.data(), trailing_.data()); }); #endif } struct StackParam : public dmlc::Parameter<StackParam> { int axis; int num_args; DMLC_DECLARE_PARAMETER(StackParam) { DMLC_DECLARE_FIELD(axis) .set_default(0) .describe("The axis in the result array along which the input arrays are stacked."); DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) .describe("Number of inputs to be stacked."); } }; inline bool StackOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const StackParam& param = dmlc::get<StackParam>(attrs.parsed); mxnet::TShape dshape; for (const mxnet::TShape& i : (*in_attrs)) { shape_assign(&dshape, i); } if (!shape_is_known(dshape)) return false; mxnet::TShape oshape(dshape.ndim() + 1, -1); int axis = CheckAxis(param.axis, oshape.ndim()); for (int i = 0; i < axis; ++i) { oshape[i] = dshape[i]; } oshape[axis] = param.num_args; for (index_t i = axis + 1; i < oshape.ndim(); ++i) { oshape[i] = dshape[i-1]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); return shape_is_known(oshape); } template<typename xpu> void StackOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, outputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > data(inputs.size()); Tensor<xpu, 3, DType> out; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= outputs[0].shape_[i]; } for (int i = axis + 1; i < outputs[0].ndim(); ++i) { trailing *= outputs[0].shape_[i]; } size_t mid = outputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < inputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Concatenate(data, &out, 1, req[0]); }) } template<typename xpu> void StackOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const StackParam& param = dmlc::get<StackParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size()); Tensor<xpu, 3, DType> grad; size_t leading = 1, trailing = 1; for (int i = 0; i < axis; ++i) { leading *= inputs[0].shape_[i]; } for (int i = axis + 1; i < inputs[0].ndim(); ++i) { trailing *= inputs[0].shape_[i]; } size_t mid = inputs[0].shape_[axis]; Shape<3> oshape = Shape3(leading, mid, trailing); grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s); for (size_t i = 0; i < outputs.size(); ++i) { Shape<3> dshape = Shape3(leading, 1, trailing); grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s); } Split(grad, &grad_in, 1, req); }) } struct SqueezeParam : public dmlc::Parameter<SqueezeParam> { dmlc::optional<mxnet::Tuple<int>> axis; DMLC_DECLARE_PARAMETER(SqueezeParam) { DMLC_DECLARE_FIELD(axis) .set_default(dmlc::optional<mxnet::Tuple<int>>()) .describe("Selects a subset of the single-dimensional entries in the shape." " If an axis is selected with shape entry greater than one, an error is raised."); } }; // Given a shape that may have dim size equal to 0, // move all the zeros to the last of the shape array // and keep the relative order of the non-zero values. // Returns the new shape size after moving all zeros to the end. inline size_t SqueezeShapeHelper(mxnet::TShape* shape) { CHECK(shape != nullptr); size_t count = 0; for (int i = 0; i < shape->ndim(); ++i) { if ((*shape)[i] == -1) { ++count; } else { std::swap((*shape)[i], (*shape)[i-count]); } } return shape->ndim() - count; } inline bool SqueezeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]"; CHECK_EQ(out_attrs->size(), 1U); const mxnet::TShape& dshape = in_attrs->at(0); const int dndim = dshape.ndim(); if (!shape_is_known(dshape)) return false; mxnet::TShape oshape = dshape; if (param.axis.has_value()) { // preprocess axis mxnet::Tuple<int> axes = param.axis.value(); for (int i = 0; i < axes.ndim(); ++i) { if (axes[i] < 0) { axes[i] += dndim; CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim; } CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension " << dndim; CHECK_EQ(dshape[axes[i]], 1) << "cannot select an axis to squeeze out which has size=" << dshape[axes[i]] << " not equal to one"; CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis"; oshape[axes[i]] = -1; } } else { for (int i = 0; i < oshape.ndim(); ++i) { if (oshape[i] == 1) oshape[i] = -1; } } size_t oshape_size = SqueezeShapeHelper(&oshape); if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1) oshape[0] = 1; oshape_size = 1; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size)); return true; } struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> { int block_size; DMLC_DECLARE_PARAMETER(DepthToSpaceParam) { DMLC_DECLARE_FIELD(block_size) .describe("Blocks of [block_size. block_size] are moved"); } }; inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor"; mxnet::TShape expected_out(4, -1); mxnet::TShape& in_shape = in_attrs->at(0); int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_EQ(in_shape[1] % (block * block), 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:1(depth dimension) should be a multiple of 'block^2'"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] / (block * block); int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] * block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function updates the value of input index from where the data element * needs to be fetched and written out to the ith location in output tensor * \param index_position index within offset array to get offset of given dimension * \param dim_size size of current dimension * \param idx output tensor index * \param inp_index index within input tensor from where value is retrieved * \param offset_arr array containing the linear offset of input tensor */ MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx, index_t *inp_index, const index_t* offset_arr) { index_t next_idx_val = *idx / dim_size; *inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position]; *idx = next_idx_val; } /*! * \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct depth_to_space_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[3]; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2]; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1] / (block * block); update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing depth_to_space operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_depth_to_space { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * size[3]; offset_arr[3] = offset_arr[4] * size[2]; offset_arr[2] = offset_arr[3] * size[1] / (block * block); offset_arr[1] = offset_arr[2] * block; offset_arr[0] = offset_arr[1] * block; } }; template<typename xpu> void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<depth_to_space_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor"; mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1); mxnet::TShape& in_shape = in_attrs->at(0); int block = param.block_size; CHECK_NE(block, 0) << "block_size must be a positive integer value"; CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0"; CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0"; CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0"; CHECK_EQ(in_shape[2] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:2(1st Space dimension) should be a multiple of 'block' "; CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0"; CHECK_EQ(in_shape[3] % block, 0) << "Cannot perform Depth To Space operation on the specified tensor." " Dimension:3(2nd space dimension) should be a multiple of 'block' "; expected_out[0] = in_shape[0]; expected_out[1] = in_shape[1] * block * block; int i = 2; while (i < expected_out.ndim()) { expected_out[i] = in_shape[i] / block; ++i; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out); return shape_is_known(expected_out); } inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } /*! * \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) -> * (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped * to the ith index of output tensor * \param i tensor index * \param out_data output tensor * \param in_data input tensor * \param block size of chunks to be moved out of depth dimension * \param size array containing the size of each dimension of input tensor * \param offset_arr array containing the linear offset of input tensor */ template<int req> struct space_to_depth_forward { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block, const index_t* size, const index_t* offset_arr) { index_t inp_index = 0, idx = i, dim_size; dim_size = size[3] / block; update_index(4, dim_size, &idx, &inp_index, offset_arr); dim_size = size[2] / block; update_index(2, dim_size, &idx, &inp_index, offset_arr); dim_size = size[1]; update_index(1, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(5, dim_size, &idx, &inp_index, offset_arr); dim_size = block; update_index(3, dim_size, &idx, &inp_index, offset_arr); dim_size = size[0]; update_index(0, dim_size, &idx, &inp_index, offset_arr); KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]); } }; /*! * \brief This function calculates the linear offset for each dimension of * input tensor and stores them in an array, which is later used in * performing space_to_depth operation * \param i global thread id * \param offset_arr array to be populated with offset values * \param size array to be populated with size of each dimension of input tensor * \param block size of chunks to be moved out of depth dimension * \param size0 size of Dim 0 of input tensor * \param size1 size of Dim 1 of input tensor * \param size2 size of Dim 2 of input tensor * \param size3 size of Dim 3 of input tensor */ template<int req> struct compute_offset_for_space_to_depth { template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block, const index_t size0, const index_t size1, const index_t size2, const index_t size3) { size[0] = size0; size[1] = size1; size[2] = size2; size[3] = size3; offset_arr[5] = 1; offset_arr[4] = offset_arr[5] * block; offset_arr[3] = offset_arr[4] * size[3] / block; offset_arr[2] = offset_arr[3] * block; offset_arr[1] = offset_arr[2] * size[2] / block; offset_arr[0] = offset_arr[1] * size[1]; } }; template<typename xpu> void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed); using namespace mxnet_op; int block = param.block_size; mshadow::Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s); char* workspace_curr_ptr = workspace.dptr_; index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr); index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6); MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch( s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1], in_data.shape_[2], in_data.shape_[3]); Kernel<space_to_depth_forward<req_type>, xpu>::Launch( s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(), block, size, offset_arr); }); }); } namespace split_enum { enum SplitOpInputs {kData}; } // namespace split_enum struct SplitParam : public dmlc::Parameter<SplitParam> { mxnet::TShape indices; int axis; bool squeeze_axis; int sections; DMLC_DECLARE_PARAMETER(SplitParam) { DMLC_DECLARE_FIELD(indices) .describe("Indices of splits. The elements should denote the boundaries of at which split" " is performed along the `axis`."); DMLC_DECLARE_FIELD(axis).set_default(1) .describe("Axis along which to split."); DMLC_DECLARE_FIELD(squeeze_axis).set_default(0) .describe("If true, Removes the axis with length 1 from the shapes of the output arrays." " **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1" " only along the `axis` which it is split." " Also `squeeze_axis` can be set to ``true``" " only if ``input.shape[axis] == num_outputs``."); DMLC_DECLARE_FIELD(sections).set_default(0) .describe("Number of sections if equally splitted. Default to 0 which means split by indices."); } }; // struct SplitParam inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) { mxnet::TShape indices(sections+1, -1); indices[0] = 0; int64_t section_size = ishape[axis] / sections; for (int i = 0; i < sections; ++i) { indices[i+1] = section_size * (i + 1); } return indices; } inline bool SplitOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); int dtype = (*in_attrs)[0]; CHECK_NE(dtype, -1) << "First input must have specified type"; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); out_attrs->clear(); int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim(); for (int i = 0; i < num_outputs; ++i) { out_attrs->push_back(dtype); } return true; } inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs, const int real_axis) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); mxnet::TShape dshape = in_attrs->at(split_enum::kData); mxnet::TShape ishape = in_attrs->at(split_enum::kData); const mxnet::TShape indices = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim(); // Pre-compute squeezed output shape for future usage mxnet::TShape squeezed_dshape = dshape; for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) { squeezed_dshape[d] = squeezed_dshape[d+1]; } squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]); // Assign shape to every output for (int i = 0; i < num_outputs; ++i) { int start = indices[i]; int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis]; if (ishape[real_axis] == 0U) { end = start; } else { CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray " << i; CHECK(end <= ishape[real_axis]) << "end " << end << " is no less than the size of the axis " << ishape[real_axis]; } dshape[real_axis] = (end - start); if (param.squeeze_axis) { CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start; SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape); } } mxnet::TShape back_calculate_dshape = ishape; back_calculate_dshape[real_axis] = 0; for (int d = 0; d < real_axis; ++d) { back_calculate_dshape[d] = (*out_attrs)[0][d]; } if (param.squeeze_axis) { back_calculate_dshape[real_axis] = num_outputs; } else { for (int i = 0; i < num_outputs; ++i) { back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis]; } } for (int d = real_axis + 1; d < ishape.ndim(); ++d) { if (param.squeeze_axis) { back_calculate_dshape[d] = (*out_attrs)[0][d - 1]; } else { back_calculate_dshape[d] = (*out_attrs)[0][d]; } } SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape); return true; } inline bool SplitOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { using namespace mshadow; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), 1U); mxnet::TShape dshape = in_attrs->at(split_enum::kData); if (!mxnet::ndim_is_known(dshape)) return false; if (param.axis >= 0) { CHECK_LT(param.axis, dshape.ndim()); } else { CHECK_LT(param.axis + dshape.ndim(), dshape.ndim()); } int real_axis = param.axis; if (real_axis < 0) { real_axis += dshape.ndim(); } return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis); } struct SplitKernel { /*! * \brief Map function for forward split_v2 operator * \param i global thread id * \param in_data ptr to input buffer * \param out_data ptr to ptr of outputs buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, const DType *in_data, DType** out_data, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t target = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; target = section++) {} DType* target_data = out_data[target]; const size_t mid_idx = idx - indices[target]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[target + 1] - indices[target]; const size_t target_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; target_data[target_idx] = in_data[i]; } }; struct ConcatenateKernel { /*! * \brief Map function for backward split_v2 operator * \param i global thread id * \param out_grad ptr to ptr of out grads buffer * \param in_grad ptr to input grad buffer * \param indices ptr to indices buffer * \param num_sections # of sections after split * \param axis_size size of axis to be splitted on * \param trailing_size step size within the data buffer of the axis to be splitted on */ template<typename DType> static MSHADOW_XINLINE void Map(size_t i, DType** out_grad, DType* in_grad, const size_t* indices, const size_t num_sections, const size_t axis_size, const size_t trailing_size) { size_t idx = i / trailing_size % axis_size; size_t src = 0; for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {} DType* src_grad = out_grad[src]; const size_t mid_idx = idx - indices[src]; const size_t head_idx = i / (trailing_size * axis_size); const size_t tail_idx = i % trailing_size; const size_t section_size = indices[src + 1] - indices[src]; const size_t src_idx = head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx; in_grad[i] = src_grad[src_idx]; } }; template<typename xpu> inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob& input_data = inputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_data.ndim()); size_t mid = input_data.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_data.shape_[i]; } for (int i = real_axis + 1; i < input_data.ndim(); ++i) { trailing *= input_data.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_data.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, { std::vector<DType*> output_data; for (const TBlob& data : outputs) { output_data.push_back(data.dptr<DType>()); } workspace_size += output_data.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(output_data.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<SplitKernel, xpu>::Launch( s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_, indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()); const TBlob& input_data = inputs[split_enum::kData]; int real_axis = param.axis; if (real_axis < 0) { real_axis += input_data.ndim(); } SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } template<typename xpu> inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const int real_axis) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); Stream<xpu> *s = ctx.get_stream<xpu>(); TBlob input_grad = outputs[split_enum::kData]; size_t leading = 1, trailing = 1; CHECK_LT(real_axis, input_grad.ndim()); size_t mid = input_grad.shape_[real_axis]; for (int i = 0; i < real_axis; ++i) { leading *= input_grad.shape_[i]; } for (int i = real_axis + 1; i < input_grad.ndim(); ++i) { trailing *= input_grad.shape_[i]; } size_t workspace_size = 0; const mxnet::TShape& ishape = input_grad.shape_; const mxnet::TShape split_pts = (param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices; std::vector<size_t> indices; for (const auto& section : split_pts) { indices.push_back(section); } if (param.sections == 0) { indices.push_back(ishape[real_axis]); } workspace_size += indices.size() * sizeof(size_t); MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, { std::vector<DType*> out_grads; for (const TBlob& output_grad : inputs) { out_grads.push_back(output_grad.dptr<DType>()); } workspace_size += out_grads.size() * sizeof(DType*); Tensor<xpu, 1, char> workspace = ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size())); Tensor<xpu, 1, size_t> indices_xpu_tensor( reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size())); Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size())); Tensor<xpu, 1, DType*> ptrs_xpu_tensor( reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)), Shape1(inputs.size())); mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s); mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s); Kernel<ConcatenateKernel, xpu>::Launch( s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(), indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing); }); } template<typename xpu> inline void SplitOpBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim()) << "out grad vector size mush match the output size"; CHECK_EQ(outputs.size(), 1U); int real_axis = param.axis; if (real_axis < 0) { real_axis += outputs[split_enum::kData].ndim(); } SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis); } inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) { const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed); return (param.sections > 0) ? param.sections : param.indices.ndim(); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::TransposeParam> { size_t operator()(const mxnet::op::TransposeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axes); return ret; } }; template<> struct hash<mxnet::op::ReshapeParam> { size_t operator()(const mxnet::op::ReshapeParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.target_shape); ret = dmlc::HashCombine(ret, val.keep_highest); ret = dmlc::HashCombine(ret, val.shape); ret = dmlc::HashCombine(ret, val.reverse); return ret; } }; template<> struct hash<mxnet::op::ExpandDimParam> { size_t operator()(const mxnet::op::ExpandDimParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
3.c
/* The Computer Language Benchmarks Game * https://salsa.debian.org/benchmarksgame-team/benchmarksgame/ * * Contributed by Mr Ledrug * * Algorithm lifted from Intel Fortran #2 code by Steve Decker et al. */ #include <math.h> #include <stdio.h> #include <stdlib.h> inline int A(int i, int j) { return ((i + j) * (i + j + 1) / 2 + i + 1); } double dot(double *v, double *u, int n) { int i; double sum = 0; for (i = 0; i < n; i++) sum += v[i] * u[i]; return sum; } void mult_Av(double *v, double *out, const int n) { int i, j; double sum; #pragma omp parallel for private(sum, j) for (i = 0; i < n; i++) { for (sum = j = 0; j < n; j++) sum += v[j] / A(i, j); out[i] = sum; } } void mult_Atv(double *v, double *out, const int n) { int i, j; double sum; #pragma omp parallel for private(sum, j) for (i = 0; i < n; i++) { for (sum = j = 0; j < n; j++) sum += v[j] / A(j, i); out[i] = sum; } } double *tmp; void mult_AtAv(double *v, double *out, const int n) { mult_Av(v, tmp, n); mult_Atv(tmp, out, n); } int main(int argc, char **argv) { int n = atoi(argv[1]); if (n <= 0) n = 2000; double *u, *v; u = malloc(n * sizeof(double)); v = malloc(n * sizeof(double)); tmp = malloc(n * sizeof(double)); int i; for (i = 0; i < n; i++) u[i] = 1; for (i = 0; i < 10; i++) { mult_AtAv(u, v, n); mult_AtAv(v, u, n); } printf("%.9f\n", sqrt(dot(u, v, n) / dot(v, v, n))); return 0; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(Index shift=0; shift<threads; ++shift) { Index i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((std::size_t(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((std::size_t(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index m = this->m_mc; Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
icd3d.c
#include <math.h> #include <stdio.h> #include <time.h> #include <omp.h> #include "icd3d.h" #include "allocate.h" void ICDStep3DCone(struct Sino *sino, struct Image *img, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct ReconAux *reconAux) { /** * Updates one voxel. Voxel change is stored in icdInfo->Delta_xj. */ /** * Compute forward model term of theta1 and theta2: * * theta1_f = -e^t W A_{*,j} * theta2_f = A_{*,j}^t W A _{*,j} */ computeTheta1Theta2ForwardTerm(sino, A, icdInfo, reconParams); /** * Compute prior model term of theta1 and theta2: * */ if(reconParams->prox_mode) computeTheta1Theta2PriorTermProxMap(icdInfo, reconParams); else computeTheta1Theta2PriorTermQGGMRF(icdInfo, reconParams); computeDeltaXjAndUpdate(icdInfo, reconParams, img, reconAux); updateErrorSinogram(sino, A, icdInfo); } void prepareICDInfo(long int j_x, long int j_y, long int j_z, struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconAux *reconAux, struct ReconParams *reconParams) { icdInfo->old_xj = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)]; if(reconParams->prox_mode) icdInfo->proxMapInput_j = img->proxMapInput[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)]; icdInfo->j_x = j_x; icdInfo->j_y = j_y; icdInfo->j_z = j_z; extractNeighbors(icdInfo, img, reconParams); icdInfo->theta1_f = 0; icdInfo->theta2_f = 0; icdInfo->theta1_p_QGGMRF = 0; icdInfo->theta2_p_QGGMRF = 0; icdInfo->theta1_p_proxMap = 0; icdInfo->theta2_p_proxMap = 0; } void extractNeighbors(struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconParams *reconParams) { long int j_x, j_y, j_z; long int N_x, N_y, N_z; long int PLx, MIx; long int PLy, MIy; long int PLz, MIz; j_x = icdInfo->j_x; j_y = icdInfo->j_y; j_z = icdInfo->j_z; N_x = img->params.N_x; N_y = img->params.N_y; N_z = img->params.N_z; /** * Use reflective boundary conditions to find the indices of the neighbors */ PLx = (j_x == N_x-1) ? N_x-2 : j_x+1; PLy = (j_y == N_y-1) ? N_y-2 : j_y+1; PLz = (j_z == N_z-1) ? N_z-2 : j_z+1; MIx = (j_x == 0) ? 1 : j_x-1; MIy = (j_y == 0) ? 1 : j_y-1; MIz = (j_z == 0) ? 1 : j_z-1; /** * Compute the neighbor pixel values * * Note that all the pixels of the first half of the arrays * have a corresponding pixel in the second half of the array * that is on the spacially opposite side. * Example: neighborsFace[0] opposite of neighborsFace[3] */ if (reconParams->bFace>=0) { /* Face Neighbors (primal) */ //icdInfo->neighborsFace[0] = img->vox[PLx][j_y][j_z]; //icdInfo->neighborsFace[1] = img->vox[j_x][PLy][j_z]; //icdInfo->neighborsFace[2] = img->vox[j_x][j_y][PLz]; icdInfo->neighborsFace[0] = img->vox[index_3D(PLx,j_y,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsFace[1] = img->vox[index_3D(j_x,PLy,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsFace[2] = img->vox[index_3D(j_x,j_y,PLz,img->params.N_y,img->params.N_z)]; /* Face Neighbors (opposite) */ //icdInfo->neighborsFace[3] = img->vox[MIx][j_y][j_z]; //icdInfo->neighborsFace[4] = img->vox[j_x][MIy][j_z]; //icdInfo->neighborsFace[5] = img->vox[j_x][j_y][MIz]; icdInfo->neighborsFace[3] = img->vox[index_3D(MIx,j_y,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsFace[4] = img->vox[index_3D(j_x,MIy,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsFace[5] = img->vox[index_3D(j_x,j_y,MIz,img->params.N_y,img->params.N_z)]; } if (reconParams->bEdge>=0) { /* Edge Neighbors (primal) */ //icdInfo->neighborsEdge[ 0] = img->vox[j_x][PLy][PLz]; //icdInfo->neighborsEdge[ 1] = img->vox[j_x][PLy][MIz]; //icdInfo->neighborsEdge[ 2] = img->vox[PLx][j_y][PLz]; //icdInfo->neighborsEdge[ 3] = img->vox[PLx][j_y][MIz]; //icdInfo->neighborsEdge[ 4] = img->vox[PLx][PLy][j_z]; //icdInfo->neighborsEdge[ 5] = img->vox[PLx][MIy][j_z]; icdInfo->neighborsEdge[0] = img->vox[index_3D(j_x,PLy,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[1] = img->vox[index_3D(j_x,PLy,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[2] = img->vox[index_3D(PLx,j_y,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[3] = img->vox[index_3D(PLx,j_y,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[4] = img->vox[index_3D(PLx,PLy,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[5] = img->vox[index_3D(PLx,MIy,j_z,img->params.N_y,img->params.N_z)]; /* Edge Neighbors (opposite) */ //icdInfo->neighborsEdge[ 6] = img->vox[j_x][MIy][MIz]; //icdInfo->neighborsEdge[ 7] = img->vox[j_x][MIy][PLz]; //icdInfo->neighborsEdge[ 8] = img->vox[MIx][j_y][MIz]; //icdInfo->neighborsEdge[ 9] = img->vox[MIx][j_y][PLz]; //icdInfo->neighborsEdge[10] = img->vox[MIx][MIy][j_z]; //icdInfo->neighborsEdge[11] = img->vox[MIx][PLy][j_z]; icdInfo->neighborsEdge[6] = img->vox[index_3D(j_x,MIy,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[7] = img->vox[index_3D(j_x,MIy,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[8] = img->vox[index_3D(MIx,j_y,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[9] = img->vox[index_3D(MIx,j_y,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[10] = img->vox[index_3D(MIx,MIy,j_z,img->params.N_y,img->params.N_z)]; icdInfo->neighborsEdge[11] = img->vox[index_3D(MIx,PLy,j_z,img->params.N_y,img->params.N_z)]; } if (reconParams->bVertex>=0) { /* Vertex Neighbors (primal) */ //icdInfo->neighborsVertex[0] = img->vox[PLx][PLy][PLz]; //icdInfo->neighborsVertex[1] = img->vox[PLx][PLy][MIz]; //icdInfo->neighborsVertex[2] = img->vox[PLx][MIy][PLz]; //icdInfo->neighborsVertex[3] = img->vox[PLx][MIy][MIz]; icdInfo->neighborsVertex[0] = img->vox[index_3D(PLx,PLy,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[1] = img->vox[index_3D(PLx,PLy,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[2] = img->vox[index_3D(PLx,MIy,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[3] = img->vox[index_3D(PLx,MIy,MIz,img->params.N_y,img->params.N_z)]; /* Vertex Neighbors (opposite) */ //icdInfo->neighborsVertex[4] = img->vox[MIx][MIy][MIz]; //icdInfo->neighborsVertex[5] = img->vox[MIx][MIy][PLz]; //icdInfo->neighborsVertex[6] = img->vox[MIx][PLy][MIz]; //icdInfo->neighborsVertex[7] = img->vox[MIx][PLy][PLz]; icdInfo->neighborsVertex[4] = img->vox[index_3D(MIx,MIy,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[5] = img->vox[index_3D(MIx,MIy,PLz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[6] = img->vox[index_3D(MIx,PLy,MIz,img->params.N_y,img->params.N_z)]; icdInfo->neighborsVertex[7] = img->vox[index_3D(MIx,PLy,PLz,img->params.N_y,img->params.N_z)]; } } void computeTheta1Theta2ForwardTerm(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams) { /** * Compute forward model term of theta1 and theta2: * * theta1_f = -e^t W A_{*,j} * theta2_f = A_{*,j}^t W A _{*,j} */ long int i_beta, i_v, i_w; long int j_x, j_y, j_z, j_u; float B_ij, A_ij; j_x = icdInfo->j_x; j_y = icdInfo->j_y; j_z = icdInfo->j_z; for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { A_ij = B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]; icdInfo->theta1_f -= sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * A_ij; icdInfo->theta2_f += A_ij * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * A_ij; } } } if(strcmp(reconParams->weightScaler_domain,"spatiallyInvariant") == 0) { icdInfo->theta1_f /= sino->params.weightScaler_value; icdInfo->theta2_f /= sino->params.weightScaler_value; } else { fprintf(stderr, "ERROR in computeTheta1Theta2ForwardTerm: can't recongnize weightScaler_domain.\n"); exit(-1); } } void computeTheta1Theta2PriorTermQGGMRF(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams) { /** * Compute prior model term of theta1 and theta2: * * theta1_p_QGGMRF = sum 2 b_{j,r} * surrCoeff(x_j - x_r) * (x_j - x_r) * {r E ∂j} * * theta2_p_QGGMRF = sum 2 b_{j,r} * surrCoeff(x_j - x_r) * {r E ∂j} */ int i; float delta, surrogateCoeff; float sum1Face = 0; float sum1Edge = 0; float sum1Vertex = 0; float sum2Face = 0; float sum2Edge = 0; float sum2Vertex = 0; if (reconParams->bFace>=0) { for (i = 0; i < 6; ++i) { delta = icdInfo->old_xj - icdInfo->neighborsFace[i]; surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams); sum1Face += surrogateCoeff * delta; sum2Face += surrogateCoeff; } } if (reconParams->bEdge>=0) { for (i = 0; i < 12; ++i) { delta = icdInfo->old_xj - icdInfo->neighborsEdge[i]; surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams); sum1Edge += surrogateCoeff * delta; sum2Edge += surrogateCoeff; } } if (reconParams->bVertex>=0) { for (i = 0; i < 8; ++i) { delta = icdInfo->old_xj - icdInfo->neighborsVertex[i]; surrogateCoeff = surrogateCoeffQGGMRF(delta, reconParams); sum1Vertex += surrogateCoeff * delta; sum2Vertex += surrogateCoeff; } } icdInfo->theta1_p_QGGMRF = 2 * reconParams->bFace * sum1Face + 2 * reconParams->bEdge * sum1Edge + 2 * reconParams->bVertex * sum1Vertex; icdInfo->theta2_p_QGGMRF = 2 * reconParams->bFace * sum2Face + 2 * reconParams->bEdge * sum2Edge + 2 * reconParams->bVertex * sum2Vertex; } void computeTheta1Theta2PriorTermProxMap(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams) { /** * theta1_p_proxMap = (x_j - ~x_j) / (sigma_lambda^2) * * * theta2_p_proxMap = 1 / (sigma_lambda^2) * */ icdInfo->theta1_p_proxMap = (icdInfo->old_xj - icdInfo->proxMapInput_j) / (reconParams->sigma_lambda * reconParams->sigma_lambda); icdInfo->theta2_p_proxMap = 1.0 / (reconParams->sigma_lambda * reconParams->sigma_lambda); } float surrogateCoeffQGGMRF(float Delta, struct ReconParams *reconParams) { /** * / rho'(Delta) / (2 Delta) if Delta != 0 * surrCoeff(Delta) = { * \ rho''(0) / 2 if Delta = 0 */ float p, q, T, sigmaX, qmp; float num, denom, temp; p = reconParams->p; q = reconParams->q; T = reconParams->T; sigmaX = reconParams->sigmaX; qmp = q - p; if(fabs(Delta) < 1e-5) { /** * rho''(0) 1 * -------- = ----------------- * 2 p sigmaX^q T^(q-p) */ return 1.0 / ( p * pow(sigmaX, q) * pow(T, qmp) ); } else /* Delta != 0 */ { /** * rho'(Delta) |Delta|^(p-2) # (q/p + #) * ----------- = ------------- ------------ * 2 Delta 2 sigmaX^p (1 + #)^2 * * where | Delta |^(q-p) * # = |--------| * |T sigmaX| */ temp = pow(fabs(Delta / (T*sigmaX)), qmp); /* this is the # from above */ num = pow(fabs(Delta), p-2) * temp * (q/p + temp); denom = 2 * pow(sigmaX,p) * (1.0 + temp) * (1.0 + temp); return num / denom; } } void updateErrorSinogram(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo) { /** * Update error sinogram * * e <- e - A_{*,j} * Delta_xj */ long int i_beta, i_v, i_w; long int j_x, j_y, j_z, j_u; float B_ij; j_x = icdInfo->j_x; j_y = icdInfo->j_y; j_z = icdInfo->j_z; for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] -= B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]] * icdInfo->Delta_xj; } } } } void updateIterationStats(struct ReconAux *reconAux, struct ICDInfo3DCone *icdInfo, struct Image *img) { reconAux->TotalValueChange += fabs(icdInfo->Delta_xj); //reconAux->TotalVoxelValue += _MAX_(img->vox[icdInfo->j_x][icdInfo->j_y][icdInfo->j_z], icdInfo->old_xj); reconAux->TotalVoxelValue += _MAX_(img->vox[index_3D(icdInfo->j_x,icdInfo->j_y,icdInfo->j_z,img->params.N_y,img->params.N_z)], icdInfo->old_xj); reconAux->NumUpdatedVoxels++; } void resetIterationStats(struct ReconAux *reconAux) { reconAux->TotalValueChange = 0; reconAux->TotalVoxelValue = 0; reconAux->NumUpdatedVoxels = 0; } void RandomAux_ShuffleOrderXYZ(struct RandomAux *aux, struct ImageParams *params) { fprintf(stdout, "zipline mode 0\n"); shuffleLongIntArray(aux->orderXYZ, params->N_x * params->N_y * params->N_z); } void indexExtraction3D(long int j_xyz, long int *j_x, long int N_x, long int *j_y, long int N_y, long int *j_z, long int N_z) { /* j_xyz = j_z + N_z j_y + N_z N_y j_x */ long int j_temp; j_temp = j_xyz; /* Now, j_temp = j_z + N_z j_y + N_z N_y j_x */ *j_z = j_temp % N_z; j_temp = (j_temp-*j_z) / N_z; /* Now, j_temp = j_y + N_y j_x */ *j_y = j_temp % N_y; j_temp = (j_temp-*j_y) / N_y; /* Now, j_temp = j_x */ *j_x = j_temp; return; } float MAPCost3D(struct Sino *sino, struct Image *img, struct ReconParams *reconParams) { /** * Computes MAP cost function */ float cost; // Initialize cost with forward model cost cost = MAPCostForward(sino); // if proximal map mode, add proximal map cost if(reconParams->prox_mode) cost += MAPCostPrior_ProxMap(img, reconParams); // if qGGMRF mode, add prior cost else cost += MAPCostPrior_QGGMRF(img, reconParams); return cost; } float MAPCostForward(struct Sino *sino) { /** * ForwardCost = 1/2 ||e||^{2}_{W} */ long int i_beta, i_v, i_w; float cost; cost = 0; for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) { for (i_v = 0; i_v < sino->params.N_dv; ++i_v) { for (i_w = 0; i_w < sino->params.N_dw; ++i_w) { cost += sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]; } } } return cost / (2.0 * sino->params.weightScaler_value); } float MAPCostPrior_QGGMRF(struct Image *img, struct ReconParams *reconParams) { /** * cost = sum b_{s,r} rho(x_s-x_r) * {s,r} E P */ long int j_x, j_y, j_z; struct ICDInfo3DCone icdInfo; float cost; float temp; cost = 0; for (j_x = 0; j_x < img->params.N_x; ++j_x) { for (j_y = 0; j_y < img->params.N_y; ++j_y) for (j_z = 0; j_z < img->params.N_z; ++j_z) { /** * Prepare icdInfo */ icdInfo.j_x = j_x; icdInfo.j_y = j_y; icdInfo.j_z = j_z; extractNeighbors(&icdInfo, img, reconParams); icdInfo.old_xj = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)]; temp = MAPCostPrior_QGGMRFSingleVoxel_HalfNeighborhood(&icdInfo, reconParams); cost += temp; } } return cost; } float MAPCostPrior_ProxMap(struct Image *img, struct ReconParams *reconParams) { /** * Compute proximal mapping prior cost * 1 || ||2 * cost += ---------------- || x - x~ || * 2 sigma_lambda^2 || ||2 * */ long int j_x, j_y, j_z; float cost, diff_voxel; cost = 0; for (j_x = 0; j_x < img->params.N_x; ++j_x) { for (j_y = 0; j_y < img->params.N_y; ++j_y) { for (j_z = 0; j_z < img->params.N_z; ++j_z) { //diff_voxel = img->vox[j_x][j_y][j_z] - img->proxMapInput[j_x][j_y][j_z]; diff_voxel = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)] - img->proxMapInput[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)]; cost += diff_voxel*diff_voxel*isInsideMask(j_x, j_y, img->params.N_x, img->params.N_y); } } } cost /= 2 * reconParams->sigma_lambda * reconParams->sigma_lambda; return cost; } float MAPCostPrior_QGGMRFSingleVoxel_HalfNeighborhood(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams) { /** * Compute prior model term of theta1 and theta2: * * cost += sum b_{j,r} * rho(x_j - x_r) * {r E ∂j^half} * */ int i; float sum1Face, sum1Edge, sum1Vertex; sum1Face = 0; sum1Edge = 0; sum1Vertex = 0; if (reconParams->bFace>=0) for (i = 0; i < 3; ++i) /* Note: only use first half of the neighbors */ sum1Face += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsFace[i], reconParams); if (reconParams->bEdge>=0) for (i = 0; i < 6; ++i) /* Note: only use first half of the neighbors */ sum1Edge += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsEdge[i], reconParams); if (reconParams->bVertex>=0) for (i = 0; i < 4; ++i) /* Note: only use first half of the neighbors */ sum1Vertex += QGGMRFPotential(icdInfo->old_xj - icdInfo->neighborsVertex[i], reconParams); return reconParams->bFace * sum1Face + reconParams->bEdge * sum1Edge + reconParams->bVertex * sum1Vertex; } /* the potential function of the QGGMRF prior model. p << q <= 2 */ float QGGMRFPotential(float delta, struct ReconParams *reconParams) { float p, q, T, sigmaX; float temp, GGMRF_Pot; p = reconParams->p; q = reconParams->q; T = reconParams->T; sigmaX = reconParams->sigmaX; GGMRF_Pot = pow(fabs(delta),p)/(p*pow(sigmaX,p)); temp = pow(fabs(delta/(T*sigmaX)), q-p); return ( GGMRF_Pot * temp/(1.0+temp) ); } void partialZipline_computeStartStopIndex(long int *j_z_start, long int *j_z_stop, long int indexZiplines, long int numVoxelsPerZipline, long int N_z) { *j_z_start = indexZiplines*numVoxelsPerZipline; *j_z_stop = _MIN_(*j_z_start+numVoxelsPerZipline-1, N_z-1); } int partialZipline_computeZiplineIndex(long int j_z, long int numVoxelsPerZipline) { return floor(j_z / numVoxelsPerZipline); } void prepareICDInfoRandGroup(long int j_x, long int j_y, struct RandomZiplineAux *randomZiplineAux, struct ICDInfo3DCone *icdInfo, struct Image *img, struct ReconParams *reconParams, struct ReconAux *reconAux) { /* j = j_y + N_y j_x */ long int j_z, k_M; long int j_z_start, j_z_stop; long int indexZiplines; k_M = 0; for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines) { if (!reconAux->NHICD_isPartialUpdateActive || reconAux->NHICD_isPartialZiplineHot[indexZiplines]) { partialZipline_computeStartStopIndex(&j_z_start, &j_z_stop, indexZiplines, reconParams->numVoxelsPerZipline, img->params.N_z); for (j_z = j_z_start; j_z <= j_z_stop; ++j_z) { if(randomZiplineAux->groupIndex[j_x][j_y][j_z] == randomZiplineAux->k_G) { prepareICDInfo(j_x, j_y, j_z, &icdInfo[k_M], img, reconAux, reconParams); /* Increment k_M. After loop terminates k_M = No. members */ k_M++; } } } } randomZiplineAux->N_M = k_M; } void computeDeltaXjAndUpdate(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct Image *img, struct ReconAux *reconAux) { /** * Compute voxel increment Delta_xj. * Delta_xj >= -x_j accomplishes positivity constraint: * * Delta_xj = clip{ -theta1/theta2, [-x_j, inf) } */ float theta1, theta2; if(reconParams->prox_mode) { theta1 = icdInfo->theta1_f + icdInfo->theta1_p_proxMap; theta2 = icdInfo->theta2_f + icdInfo->theta2_p_proxMap; } else { theta1 = icdInfo->theta1_f + icdInfo->theta1_p_QGGMRF; theta2 = icdInfo->theta2_f + icdInfo->theta2_p_QGGMRF; } if (theta2 != 0) { icdInfo->Delta_xj = -theta1/theta2; if(reconParams->is_positivity_constraint) icdInfo->Delta_xj = _MAX_(icdInfo->Delta_xj, -icdInfo->old_xj); } else { icdInfo->Delta_xj = _MAX_(icdInfo->old_xj, 0); } if(icdInfo->Delta_xj != icdInfo->Delta_xj) { printf("theta1_f = %e\n", icdInfo->theta1_f); printf("theta2_f = %e\n", icdInfo->theta2_f); printf("theta1_p_QGGMRF = %e\n", icdInfo->theta1_p_QGGMRF); printf("theta2_p_QGGMRF = %e\n", icdInfo->theta2_p_QGGMRF); printf("theta1_p_proxMap = %e\n", icdInfo->theta1_p_proxMap); printf("theta2_p_proxMap = %e\n", icdInfo->theta2_p_proxMap); printf("theta2 = %e\n", theta2); printf("theta2 = %e\n", theta2); printf("-t1/t2 = %e\n", -theta1/theta2); printf("Delta_xj = %e\n", icdInfo->Delta_xj); printf("------------------------\n"); } /** * Update voxel: * * x_j <- x_j + Delta_xj */ //img->vox[icdInfo->j_x][icdInfo->j_y][icdInfo->j_z] += icdInfo->Delta_xj; img->vox[index_3D(icdInfo->j_x,icdInfo->j_y,icdInfo->j_z,img->params.N_y,img->params.N_z)] += icdInfo->Delta_xj; } void computeDeltaXjAndUpdateGroup(struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux, struct ReconParams *reconParams, struct Image *img, struct ReconAux *reconAux) { long int N_M, k_M; struct ICDInfo3DCone *info; N_M = randomZiplineAux->N_M; for (k_M = 0; k_M < N_M; ++k_M) { info = &icdInfo[k_M]; computeDeltaXjAndUpdate(info, reconParams, img, reconAux); } } void updateIterationStatsGroup(struct ReconAux *reconAux, struct ICDInfo3DCone *icdInfoArray, struct RandomZiplineAux *randomZiplineAux, struct Image *img, struct ReconParams *reconParams) { long int N_M, k_M; float absDelta, totValue; struct ICDInfo3DCone *icdInfo; long int j_x, j_y, j_z; long int indexZiplines; j_x = icdInfoArray[0].j_x; j_y = icdInfoArray[0].j_y; N_M = randomZiplineAux->N_M; for (k_M = 0; k_M < N_M; ++k_M) { icdInfo = &icdInfoArray[k_M]; j_z = icdInfo->j_z; indexZiplines = partialZipline_computeZiplineIndex(j_z, reconParams->numVoxelsPerZipline); absDelta = fabs(icdInfo->Delta_xj); //totValue = _MAX_(img->vox[j_x][j_y][j_z], icdInfo->old_xj); totValue = _MAX_(img->vox[index_3D(j_x,j_y,j_z,img->params.N_y,img->params.N_z)], icdInfo->old_xj); reconAux->TotalValueChange += absDelta; reconAux->TotalVoxelValue += totValue; reconAux->NumUpdatedVoxels++; reconAux->NHICD_numUpdatedVoxels[indexZiplines]++; reconAux->NHICD_totalValueChange[indexZiplines] += absDelta; } } void disp_iterationInfo(struct ReconAux *reconAux, struct ReconParams *reconParams, int itNumber, int MaxIterations, float cost, float relUpdate, float stopThresholdChange, float weightScaler_value, float voxelsPerSecond, float ticToc_iteration, float weightedNormSquared_e, float ratioUpdated, float totalEquits) { printf("************************** Iteration %-2d (max. %d) **************************\n", itNumber, MaxIterations); printf("* Cost = %-10.10e\n", cost); printf("* Rel. Update = %-10.10e %% (threshold = %-10.10e %%)\n", relUpdate*100, stopThresholdChange*100); printf("* RWFE = ||e||_W/||y||_W = %-10.10e %% (threshold = %-10.10e %%)\n", reconAux->relativeWeightedForwardError*100, reconParams->stopThesholdRWFE_pct); printf("* RUFE = ||e|| / ||y|| = %-10.10e %% (threshold = %-10.10e %%)\n", reconAux->relativeUnweightedForwardError*100, reconParams->stopThesholdRUFE_pct); printf("* ----------------------------------------------------------------------------\n"); printf("* 1/M ||e||^2_W = %-10.10e = 1/%-10.10f\n", weightedNormSquared_e, 1/weightedNormSquared_e); printf("* weightScaler_value = %-10.10e = 1/%-10.10f\n", weightScaler_value, 1/weightScaler_value); printf("* ----------------------------------------------------------------------------\n"); printf("* voxelsPerSecond = %-10.10e \n", voxelsPerSecond); printf("* time icd update = %-10.10e s\n", ticToc_iteration); printf("* ratioUpdated = %-10.10e %%\n", ratioUpdated*100); printf("* totalEquits = %-10.10e \n", totalEquits); printf("******************************************************************************\n\n"); } float computeRelUpdate(struct ReconAux *reconAux, struct ReconParams *reconParams, struct Image *img) { float relUpdate; float AvgValueChange, AvgVoxelValue; float scaler; int subsampleFactor = 10; /* when chosen 1 this is completely accurate. User can mess with this to some extend*/ if(reconAux->NumUpdatedVoxels>0) { AvgValueChange = reconAux->TotalValueChange / reconAux->NumUpdatedVoxels; AvgVoxelValue = reconAux->TotalVoxelValue / reconAux->NumUpdatedVoxels; } else { AvgValueChange = 0; AvgVoxelValue = 0; } if(AvgVoxelValue>0) { /* [relativeChangeMode] 'meanImage' or 'fixedScaler' or 'percentile' */ if (strcmp(reconParams->relativeChangeMode, "meanImage")==0) { relUpdate = AvgValueChange / AvgVoxelValue; } else if (strcmp(reconParams->relativeChangeMode, "fixedScaler")==0) { relUpdate = AvgValueChange / reconParams->relativeChangeScaler; } else if (strcmp(reconParams->relativeChangeMode, "percentile")==0) { //scaler = prctile_copyFast(&img->vox[0][0][0], img->params.N_x*img->params.N_y*img->params.N_z, reconParams->relativeChangePercentile, subsampleFactor); scaler = prctile_copyFast(&img->vox[0], img->params.N_x*img->params.N_y*img->params.N_z, reconParams->relativeChangePercentile, subsampleFactor); relUpdate = AvgValueChange / scaler; } else { printf("Error: relativeChangeMode unknown\n"); exit(-1); } } else { relUpdate = 0; } return relUpdate; } /* * * * * * * * * * * * parallel * * * * * * * * * * * * **/ void prepareParallelAux(struct ParallelAux *parallelAux, long int N_M_max) { int numThreads; #pragma omp parallel { #pragma omp master { parallelAux->numThreads = numThreads = omp_get_num_threads(); } } parallelAux->N_M_max = N_M_max; parallelAux->partialTheta = (struct PartialTheta**) multialloc(sizeof(struct PartialTheta), 2, numThreads, N_M_max); parallelAux->j_u = mget_spc(numThreads, sizeof(long int)); parallelAux->i_v = mget_spc(numThreads, sizeof(long int)); parallelAux->B_ij = mget_spc(numThreads, sizeof(float)); parallelAux->k_M = mget_spc(numThreads, sizeof(long int)); parallelAux->j_z = mget_spc(numThreads, sizeof(long int)); parallelAux->i_w = mget_spc(numThreads, sizeof(long int)); parallelAux->A_ij = mget_spc(numThreads, sizeof(float)); } void freeParallelAux(struct ParallelAux *parallelAux) { multifree((void**)parallelAux->partialTheta, 2); free((void*)parallelAux->j_u); free((void*)parallelAux->i_v); free((void*)parallelAux->B_ij); free((void*)parallelAux->k_M); free((void*)parallelAux->j_z); free((void*)parallelAux->i_w); free((void*)parallelAux->A_ij); } void ICDStep3DConeGroup(struct Sino *sino, struct Image *img, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux, struct ParallelAux *parallelAux, struct ReconAux *reconAux) { if (randomZiplineAux->N_M>0) { computeTheta1Theta2ForwardTermGroup(sino, A, icdInfo, randomZiplineAux, parallelAux, reconParams); if(reconParams->prox_mode) computeTheta1Theta2PriorTermProxMapGroup(icdInfo, reconParams, randomZiplineAux); else computeTheta1Theta2PriorTermQGGMRFGroup(icdInfo, reconParams, randomZiplineAux); computeDeltaXjAndUpdateGroup(icdInfo, randomZiplineAux, reconParams, img, reconAux); updateErrorSinogramGroup(sino, A, icdInfo, randomZiplineAux); } } void computeTheta1Theta2ForwardTermGroup(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux, struct ParallelAux *parallelAux, struct ReconParams *reconParams) { /** * Compute forward model term of theta1 and theta2 for all members: * * theta1_f = -e^t W A_{*,j} * theta2_f = A_{*,j}^t W A _{*,j} */ long int i_beta, i_v, i_w; long int j_x, j_y, j_z, j_u; float B_ij, A_ij; long int N_M, k_M; int threadID; N_M = randomZiplineAux->N_M; j_x = (icdInfo[0]).j_x; j_y = (icdInfo[0]).j_y; for (threadID = 0; threadID < parallelAux->numThreads; ++threadID) { for (k_M = 0; k_M < N_M; ++k_M) { parallelAux->partialTheta[threadID][k_M].t1 = 0; parallelAux->partialTheta[threadID][k_M].t2 = 0; } } #pragma omp parallel private(threadID, j_u, i_v, B_ij, k_M, j_z, i_w, A_ij) { threadID = omp_get_thread_num(); #pragma omp for for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; /* Loop through all the members along zip line */ for (k_M = 0; k_M < N_M; ++k_M) { j_z = icdInfo[k_M].j_z; for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { A_ij = B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]; parallelAux->partialTheta[threadID][k_M].t1 -= sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * A_ij; parallelAux->partialTheta[threadID][k_M].t2 += A_ij * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * A_ij; } } } } } for (threadID = 0; threadID < parallelAux->numThreads; ++threadID) { for (k_M = 0; k_M < N_M; ++k_M) { icdInfo[k_M].theta1_f += parallelAux->partialTheta[threadID][k_M].t1; icdInfo[k_M].theta2_f += parallelAux->partialTheta[threadID][k_M].t2; } } if(strcmp(reconParams->weightScaler_domain,"spatiallyInvariant") == 0) { for (k_M = 0; k_M < N_M; ++k_M) { icdInfo[k_M].theta1_f /= sino->params.weightScaler_value; icdInfo[k_M].theta2_f /= sino->params.weightScaler_value; } } else { fprintf(stderr, "ERROR in computeTheta1Theta2ForwardTerm: can't recongnize weightScaler_domain.\n"); exit(-1); } } void computeTheta1Theta2PriorTermQGGMRFGroup(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux) { long int N_M, k_M; N_M = randomZiplineAux->N_M; #pragma omp parallel for for (k_M = 0; k_M < N_M; ++k_M) { computeTheta1Theta2PriorTermQGGMRF(&icdInfo[k_M], reconParams); } } void updateErrorSinogramGroup(struct Sino *sino, struct SysMatrix *A, struct ICDInfo3DCone *icdInfo, struct RandomZiplineAux *randomZiplineAux) { /** * Update error sinogram * * e <- e - A_{*,j} * Delta_xj */ long int N_M, k_M; long int i_beta, i_v, i_w; long int j_x, j_y, j_z, j_u; float B_ij; N_M = randomZiplineAux->N_M; j_x = icdInfo[0].j_x; j_y = icdInfo[0].j_y; #pragma omp parallel for private(j_u, i_v, B_ij, k_M, j_z, i_w) for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta]; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; for (k_M = 0; k_M < N_M; ++k_M) { j_z = icdInfo[k_M].j_z; for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { sino->e[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] -= B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]] * icdInfo[k_M].Delta_xj; } } } } } void computeTheta1Theta2PriorTermProxMapGroup(struct ICDInfo3DCone *icdInfo, struct ReconParams *reconParams, struct RandomZiplineAux *randomZiplineAux) { long int N_M, k_M; N_M = randomZiplineAux->N_M; for (k_M = 0; k_M < N_M; ++k_M) { icdInfo[k_M].theta1_p_proxMap = (icdInfo[k_M].old_xj - icdInfo[k_M].proxMapInput_j) / (reconParams->sigma_lambda * reconParams->sigma_lambda); icdInfo[k_M].theta2_p_proxMap = 1.0 / (reconParams->sigma_lambda * reconParams->sigma_lambda); } } /* * * * * * * * * * * * time aux ICD * * * * * * * * * * * * **/ void speedAuxICD_reset(struct SpeedAuxICD *speedAuxICD) { speedAuxICD->numberUpdatedVoxels = 0; speedAuxICD->tic = omp_get_wtime(); speedAuxICD->toc = -1.0; speedAuxICD->voxelsPerSecond = -1.0; } void speedAuxICD_update(struct SpeedAuxICD *speedAuxICD, long int incrementNumber) { speedAuxICD->numberUpdatedVoxels += incrementNumber; } void speedAuxICD_computeSpeed(struct SpeedAuxICD *speedAuxICD) { if (speedAuxICD->numberUpdatedVoxels > 0) { speedAuxICD->toc = omp_get_wtime(); speedAuxICD->voxelsPerSecond = ((float)speedAuxICD->numberUpdatedVoxels) / (speedAuxICD->toc - speedAuxICD->tic); } else { speedAuxICD->voxelsPerSecond = 0; } } /* * * * * * * * * * * * NHICD * * * * * * * * * * * * **/ int NHICD_isVoxelHot(struct ReconParams *reconParams, struct Image *img, long int j_x, long int j_y, long int j_z, float lastChangeThreshold) { if(img->lastChange[j_x][j_y][j_z] > lastChangeThreshold) return 1; if(bernoulli(reconParams->NHICD_random/100)==1) return 1; return 0; } int NHICD_activatePartialUpdate(struct ReconParams *reconParams, float relativeWeightedForwardError) { if (relativeWeightedForwardError*100<reconParams->NHICD_ThresholdAllVoxels_ErrorPercent && strcmp(reconParams->NHICD_Mode, "off")!=0) return 1; else return 0; } int NHICD_checkPartialZiplineHot(struct ReconAux *reconAux, long int j_x, long int j_y, long int indexZiplines, struct Image *img) { if (reconAux->NHICD_isPartialUpdateActive) { if (img->lastChange[j_x][j_y][indexZiplines]>=reconAux->lastChangeThreshold || img->timeToChange[j_x][j_y][indexZiplines]==0) { return 1; } else { img->timeToChange[j_x][j_y][indexZiplines] = _MAX_(img->timeToChange[j_x][j_y][indexZiplines]-1, 0); return 0; } } else { return 1; } } void NHICD_checkPartialZiplinesHot(struct ReconAux *reconAux, long int j_x, long int j_y, struct ReconParams *reconParams, struct Image *img) { long int indexZiplines; for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines) { reconAux->NHICD_isPartialZiplineHot[indexZiplines] = NHICD_checkPartialZiplineHot(reconAux, j_x, j_y, indexZiplines, img); reconAux->NHICD_numUpdatedVoxels[indexZiplines] = 0; reconAux->NHICD_totalValueChange[indexZiplines] = 0; } } void updateNHICDStats(struct ReconAux *reconAux, long int j_x, long int j_y, struct Image *img, struct ReconParams *reconParams) { long int jj_x, jj_y, jj_x_min, jj_y_min, jj_x_max, jj_y_max; float avgChange; float mean_timeToChange; long int sigma_timeToChange; long int indexZiplines; float w_self = 1; float w_past = 0.5; float w_neighbors = 0.5; mean_timeToChange = 100.0/reconParams->NHICD_random-1; sigma_timeToChange = round(mean_timeToChange*0.5); jj_x_min = _MAX_(j_x-1, 0); jj_y_min = _MAX_(j_y-1, 0); jj_x_max = _MIN_(j_x+1, img->params.N_x-1); jj_y_max = _MIN_(j_y+1, img->params.N_y-1); for (indexZiplines = 0; indexZiplines < reconParams->numZiplines; ++indexZiplines) { if (reconAux->NHICD_isPartialZiplineHot[indexZiplines]) { avgChange = reconAux->NHICD_numUpdatedVoxels[indexZiplines] > 0 ? reconAux->NHICD_totalValueChange[indexZiplines]/reconAux->NHICD_numUpdatedVoxels[indexZiplines] : 0; img->lastChange[j_x][j_y][indexZiplines] = w_past * img->lastChange[j_x][j_y][indexZiplines] + w_self * avgChange; for (jj_x = jj_x_min; jj_x <= jj_x_max; ++jj_x) { for (jj_y = jj_y_min; jj_y <= jj_y_max; ++jj_y) { img->lastChange[jj_x][jj_y][indexZiplines] += w_neighbors * reconAux->NHICD_neighborFilter[1+jj_x-j_x][1+jj_y-j_y] * avgChange; } } img->timeToChange[j_x][j_y][indexZiplines] = almostUniformIntegerRV(mean_timeToChange, sigma_timeToChange); } } }
4525.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd num_threads(14) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for schedule(static, 2) num_threads(2) dist_schedule(static, 8) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
pooling_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; #pragma omp parallel for for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ float32x4x2_t _r0 = vld2q_f32(r0); float32x4x2_t _r1 = vld2q_f32(r1); float32x4x2_t _r2 = vld2q_f32(r2); for (; nn>0; nn--) { float32x4x2_t _r0n = vld2q_f32(r0+8); float32x4x2_t _r1n = vld2q_f32(r1+8); float32x4x2_t _r2n = vld2q_f32(r2+8); float32x4_t _max0 = vmaxq_f32(_r0.val[0], _r0.val[1]); float32x4_t _max1 = vmaxq_f32(_r1.val[0], _r1.val[1]); float32x4_t _max2 = vmaxq_f32(_r2.val[0], _r2.val[1]); float32x4_t _r02 = vextq_f32(_r0.val[0], _r0n.val[0], 1); float32x4_t _r12 = vextq_f32(_r1.val[0], _r1n.val[0], 1); float32x4_t _r22 = vextq_f32(_r2.val[0], _r2n.val[0], 1); _max0 = vmaxq_f32(_max0, _r02); _max1 = vmaxq_f32(_max1, _r12); _max2 = vmaxq_f32(_max2, _r22); float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2); vst1q_f32(outptr, _max); _r0 = _r0n; _r1 = _r1n; _r2 = _r2n; r0 += 8; r1 += 8; r2 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%1, #256] \n" "vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld2.f32 {d8-d11}, [%3]! \n" "0: \n" "pld [%1, #256] \n" "vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15 "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" "vmax.f32 q14, q4, q5 \n" "vext.32 q0, q0, q6, #1 \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" "vext.32 q2, q2, q8, #1 \n" "vmax.f32 q12, q12, q0 \n" "vext.32 q4, q4, q10, #1 \n" "vmax.f32 q13, q13, q2 \n" "vmax.f32 q14, q14, q4 \n" "vmax.f32 q12, q12, q13 \n" "vorr q0, q6, q6 \n" "vorr q1, q7, q7 \n" "vmax.f32 q12, q12, q14 \n" "vorr q2, q8, q8 \n" "vorr q3, q9, q9 \n" "vorr q4, q10, q10 \n" "vorr q5, q11, q11 \n" "subs %0, #1 \n" "vst1.f32 {d24-d25}, [%4]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #32 \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(std::max(r0[0], r0[1]), r0[2]); float max1 = std::max(std::max(r1[0], r1[1]), r1[2]); float max2 = std::max(std::max(r2[0], r2[1]), r2[2]); *outptr = std::max(std::max(max0, max1), max2); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep;//1 + w; r1 += tailstep;//1 + w; r2 += tailstep;//1 + w; } } }
serialized.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); void *creator_frame = get_frame_address(0); int t = (int)sin(0.1); #pragma omp task if (t) { void *task_frame = get_frame_address(0); if (creator_frame == task_frame) { // Assume this code was inlined which the compiler is allowed to do. print_frame(0); } else { // The exit frame must be our parent! print_frame_from_outlined_fn(1); } print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[NULL]] // CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0) // CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // CHECK-SAME: parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[MAIN_REENTER]] // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2 // CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}} // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]], // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: parent_task_frame.exit=[[EXIT]] // CHECK-SAME: parent_task_frame.reenter=[[REENTER]] // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule // CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // parallel_id is 0 because the region ended in the barrier! // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
mpi_fieldaligned.h
#pragma once #include "fieldaligned.h" #include "../backend/grid.h" #include "../backend/mpi_evaluation.h" #include "../backend/mpi_matrix.h" #include "../backend/mpi_matrix_blas.h" #include "../backend/mpi_collective.h" #include "../backend/mpi_grid.h" #include "../backend/interpolation.cuh" #include "../backend/functions.h" #include "../runge_kutta.h" namespace dg{ ///@cond /** * @brief Class to shift values in the z - direction */ struct ZShifter { ZShifter(){} /** * @brief Constructor * * @param size number of elements to exchange between processes * @param comm the communicator (cartesian) */ ZShifter( int size, MPI_Comm comm) { number_ = size; comm_ = comm; sb_.resize( number_), rb_.resize( number_); } int number() const {return number_;} int size() const {return number_;} MPI_Comm communicator() const {return comm_;} //host and device versions template<class container> void sendForward( const container& sb, container& rb) { dg::blas1::transfer( sb, sb_); sendForward_( sb_, rb_); dg::blas1::transfer( rb_, rb); } template<class container> void sendBackward( const container& sb, container& rb) { dg::blas1::transfer( sb, sb_); sendBackward_( sb_, rb_); dg::blas1::transfer( rb_, rb); } private: void sendForward_( HVec& sb, HVec& rb)const //send to next plane { int source, dest; MPI_Status status; MPI_Cart_shift( comm_, 2, +1, &source, &dest); MPI_Sendrecv( sb.data(), number_, MPI_DOUBLE, //sender dest, 9, //destination rb.data(), number_, MPI_DOUBLE, //receiver source, 9, //source comm_, &status); } void sendBackward_( HVec& sb, HVec& rb)const //send to previous plane { int source, dest; MPI_Status status; MPI_Cart_shift( comm_, 2, -1, &source, &dest); MPI_Sendrecv( sb.data(), number_, MPI_DOUBLE, //sender dest, 3, //destination rb.data(), number_, MPI_DOUBLE, //receiver source, 3, //source comm_, &status); } typedef thrust::host_vector<double> HVec; HVec sb_, rb_; int number_; //deepness, dimensions MPI_Comm comm_; }; ///@endcond /** * @brief Class for the evaluation of a parallel derivative (MPI Version) * * @ingroup utilities * @tparam LocalMatrix The matrix class of the interpolation matrix * @tparam Communicator The communicator used to exchange data in the RZ planes * @tparam LocalContainer The container-class to on which the interpolation matrix operates on (does not need to be dg::HVec) */ template <class Geometry, class LocalMatrix, class Communicator, class LocalContainer> struct MPI_FieldAligned { /** * @brief Construct from a field and a grid * * @tparam Field The Fieldlines to be integrated: Has to provide void operator()( const std::vector<dg::HVec>&, std::vector<dg::HVec>&) where the first index is R, the second Z and the last s (the length of the field line) * @tparam Limiter Class that can be evaluated on a 2d grid, returns 1 if there is a limiter and 0 if there isn't. If a field line crosses the limiter in the plane \f$ \phi=0\f$ then the limiter boundary conditions apply. * @param field The field to integrate * @param grid The grid on which to operate * @param eps Desired accuracy of runge kutta * @param limit Instance of the limiter class (Default is a limiter everywhere, note that if bcz is periodic it doesn't matter if there is a limiter or not) * @param globalbcz Choose NEU or DIR. Defines BC in parallel on box * @param deltaPhi Is either <0 (then it's ignored), may differ from hz() only if Nz() == 1 * @note If there is a limiter, the boundary condition is set by the bcz variable from the grid and can be changed by the set_boundaries function. If there is no limiter the boundary condition is periodic. */ template <class Field, class Limiter> MPI_FieldAligned(Field field, Geometry grid, double eps = 1e-4, Limiter limit = DefaultLimiter(), dg::bc globalbcz = dg::DIR, double deltaPhi = -1 ); /** * @brief Set boundary conditions * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param left left boundary value * @param right right boundary value */ void set_boundaries( dg::bc bcz, double left, double right) { bcz_ = bcz; const dg::Grid2d g2d( g_.x0(), g_.x1(), g_.y0(), g_.y1(), g_.n(), g_.Nx(), g_.Ny()); left_ = dg::evaluate( dg::CONSTANT(left), g2d); right_ = dg::evaluate( dg::CONSTANT(right),g2d); } /** * @brief Set boundary conditions * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param left left boundary value * @param right right boundary value */ void set_boundaries( dg::bc bcz, const MPI_Vector<LocalContainer>& left, const MPI_Vector<LocalContainer>& right) { bcz_ = bcz; left_ = left.data(); right_ = right.data(); } /** * @brief Set boundary conditions in the limiter region * * if Dirichlet boundaries are used the left value is the left function value, if Neumann boundaries are used the left value is the left derivative value * @param bcz boundary condition * @param global 3D vector containing boundary values * @param scal_left left scaling factor * @param scal_right right scaling factor */ void set_boundaries( dg::bc bcz, const MPI_Vector<LocalContainer>& global, double scal_left, double scal_right) { bcz_ = bcz; unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); if( g_.z0() == g_.global().z0()) { cView left( global.data().cbegin(), global.data().cbegin() + size); View leftView( left_.begin(), left_.end()); cusp::copy( left, leftView); dg::blas1::scal( left_, scal_left); } if( g_.z1() == g_.global().z1()) { cView right( global.data().cbegin()+(g_.Nz()-1)*size, global.data().cbegin() + g_.Nz()*size); View rightView( right_.begin(), right_.end()); cusp::copy( right, rightView); dg::blas1::scal( right_, scal_right); } } /** * @brief Evaluate a 2d functor and transform to all planes along the fieldlines * * Evaluates the given functor on a 2d plane and then follows fieldlines to * get the values in the 3rd dimension. Uses the grid given in the constructor. * @tparam BinaryOp Binary Functor * @param f Functor to evaluate * @param plane The number of the plane to start * * @return Returns an instance of container */ template< class BinaryOp> MPI_Vector<LocalContainer> evaluate( BinaryOp f, unsigned plane=0) const; /** * @brief Evaluate a 2d functor and transform to all planes along the fieldlines * * Evaluates the given functor on a 2d plane and then follows fieldlines to * get the values in the 3rd dimension. Uses the grid given in the constructor. * The second functor is used to scale the values along the fieldlines. * The fieldlines are assumed to be periodic. * @tparam BinaryOp Binary Functor * @tparam UnaryOp Unary Functor * @param f Functor to evaluate in x-y * @param g Functor to evaluate in z * @param p0 The number of the plane to start * @param rounds The number of rounds to follow a fieldline * * @return Returns an instance of container */ template< class BinaryOp, class UnaryOp> MPI_Vector<LocalContainer> evaluate( BinaryOp f, UnaryOp g, unsigned p0, unsigned rounds) const; /** * @brief Applies the interpolation to the next planes * * @param in input * @param out output may not equal intpu */ void einsPlus( const MPI_Vector<LocalContainer>& in, MPI_Vector<LocalContainer>& out); /** * @brief Applies the interpolation to the previous planes * * @param in input * @param out output may not equal intpu */ void einsMinus( const MPI_Vector<LocalContainer>& in, MPI_Vector<LocalContainer>& out); /** * @brief Applies the transposed interpolation to the previous plane * * @param in input * @param out output may not equal intpu */ void einsPlusT( const MPI_Vector<LocalContainer>& in, MPI_Vector<LocalContainer>& out); /** * @brief Applies the transposed interpolation to the next plane * * @param in input * @param out output may not equal intpu */ void einsMinusT( const MPI_Vector<LocalContainer>& in, MPI_Vector<LocalContainer>& out); /** * @brief hz is the distance between the plus and minus planes * * @return three-dimensional vector */ const MPI_Vector<LocalContainer>& hz()const {return hz_;} /** * @brief hp is the distance between the plus and current planes * * @return three-dimensional vector */ const MPI_Vector<LocalContainer>& hp()const {return hp_;} /** * @brief hm is the distance between the current and minus planes * * @return three-dimensional vector */ const MPI_Vector<LocalContainer>& hm()const {return hm_;} /** * @brief Access the underlying grid * * @return the grid */ const Geometry& grid() const{return g_;} private: typedef cusp::array1d_view< typename LocalContainer::iterator> View; typedef cusp::array1d_view< typename LocalContainer::const_iterator> cView; MPI_Vector<LocalContainer> hz_, hp_, hm_; LocalContainer ghostM, ghostP; Geometry g_; dg::bc bcz_; LocalContainer left_, right_; LocalContainer limiter_; std::vector<LocalContainer> tempXYplus_, tempXYminus_, temp_; LocalContainer tempZ_; Communicator commXYplus_, commXYminus_; ZShifter commZ_; LocalMatrix plus, minus; //interpolation matrices LocalMatrix plusT, minusT; //interpolation matrices }; ///@cond //////////////////////////////////////DEFINITIONS///////////////////////////////////// template<class MPIGeometry, class LocalMatrix, class CommunicatorXY, class LocalContainer> template <class Field, class Limiter> MPI_FieldAligned<MPIGeometry, LocalMatrix, CommunicatorXY, LocalContainer>::MPI_FieldAligned(Field field, MPIGeometry grid, double eps, Limiter limit, dg::bc globalbcz, double deltaPhi ): hz_( dg::evaluate( dg::zero, grid)), hp_( hz_), hm_( hz_), g_(grid), bcz_(grid.bcz()), tempXYplus_(g_.Nz()), tempXYminus_(g_.Nz()), temp_(g_.Nz()) { //create communicator with all processes in plane typename MPIGeometry::perpendicular_grid g2d = grid.perp_grid(); unsigned localsize = g2d.size(); limiter_ = dg::evaluate( limit, g2d.local()); right_ = left_ = dg::evaluate( zero, g2d.local()); ghostM.resize( localsize); ghostP.resize( localsize); //set up grid points as start for fieldline integrations std::vector<MPI_Vector<thrust::host_vector<double> > > y( 5, dg::evaluate(dg::zero, g2d)); y[0] = dg::evaluate( dg::cooX2d, g2d); y[1] = dg::evaluate( dg::cooY2d, g2d); y[2] = dg::evaluate( dg::zero, g2d);//distance (not angle) y[3] = dg::pullback( dg::cooX2d, g2d); y[4] = dg::pullback( dg::cooY2d, g2d); //integrate to next z-planes std::vector<thrust::host_vector<double> > yp(3, y[0].data()), ym(yp); if(deltaPhi<=0) deltaPhi = grid.hz(); else assert( g_.Nz() == 1 || grid.hz()==deltaPhi); #ifdef _OPENMP #pragma omp parallel for shared(field) #endif //_OPENMP for( unsigned i=0; i<localsize; i++) { thrust::host_vector<double> coords(5), coordsP(5), coordsM(5); coords[0] = y[0].data()[i], coords[1] = y[1].data()[i], coords[2] = y[2].data()[i], coords[3] = y[3].data()[i], coords[4] = y[4].data()[i]; double phi1 = deltaPhi; boxintegrator( field, g2d.global(), coords, coordsP, phi1, eps, globalbcz); phi1 = -deltaPhi; boxintegrator( field, g2d.global(), coords, coordsM, phi1, eps, globalbcz); yp[0][i] = coordsP[0], yp[1][i] = coordsP[1], yp[2][i] = coordsP[2]; ym[0][i] = coordsM[0], ym[1][i] = coordsM[1], ym[2][i] = coordsM[2]; } //determine pid of result thrust::host_vector<int> pids( localsize); for( unsigned i=0; i<localsize; i++) { pids[i] = g2d.pidOf( yp[0][i], yp[1][i]); if( pids[i] == -1) { std::cerr << "ERROR: PID NOT FOUND!\n"; return; } } CommunicatorXY cp( pids, g2d.communicator()); commXYplus_ = cp; thrust::host_vector<double> pX, pY; dg::blas1::transfer( cp.collect( yp[0]), pX); dg::blas1::transfer( cp.collect( yp[1]), pY); //construt interpolation matrix plus = dg::create::interpolation( pX, pY, g2d.local(), globalbcz); //inner points hopefully never lie exactly on local boundary cusp::transpose( plus, plusT); //do the same for the minus z-plane for( unsigned i=0; i<pids.size(); i++) { pids[i] = g2d.pidOf( ym[0][i], ym[1][i]); if( pids[i] == -1) { std::cerr << "ERROR: PID NOT FOUND!\n"; return; } } CommunicatorXY cm( pids, g2d.communicator()); commXYminus_ = cm; dg::blas1::transfer( cm.collect( ym[0]), pX); dg::blas1::transfer( cm.collect( ym[1]), pY); minus = dg::create::interpolation( pX, pY, g2d.local(), globalbcz); //inner points hopefully never lie exactly on local boundary cusp::transpose( minus, minusT); //copy to device for( unsigned i=0; i<g_.Nz(); i++) { thrust::copy( yp[2].begin(), yp[2].end(), hp_.data().begin() + i*localsize); thrust::copy( ym[2].begin(), ym[2].end(), hm_.data().begin() + i*localsize); } dg::blas1::scal( hm_, -1.); dg::blas1::axpby( 1., hp_, +1., hm_, hz_); for( unsigned i=0; i<g_.Nz(); i++) { tempXYplus_[i].resize( commXYplus_.size()); tempXYminus_[i].resize( commXYminus_.size()); temp_[i].resize( localsize); } commZ_ = ZShifter( localsize, g_.communicator() ); tempZ_.resize( commZ_.size()); } template<class G, class M, class C, class container> template< class BinaryOp> MPI_Vector<container> MPI_FieldAligned<G,M,C,container>::evaluate( BinaryOp binary, unsigned p0) const { return evaluate( binary, dg::CONSTANT(1), p0, 0); } template<class G, class M, class C, class container> template< class BinaryOp, class UnaryOp> MPI_Vector<container> MPI_FieldAligned<G,M,C, container>::evaluate( BinaryOp binary, UnaryOp unary, unsigned p0, unsigned rounds) const { //idea: simply apply I+/I- enough times on the init2d vector to get the result in each plane //unary function is always such that the p0 plane is at x=0 assert( g_.Nz() > 1); assert( p0 < g_.global().Nz()); const typename G::perpendicular_grid g2d = g_.perp_grid(); MPI_Vector<container> init2d = dg::pullback( binary, g2d); container temp(init2d.data()), tempP(init2d.data()), tempM(init2d.data()); MPI_Vector<container> vec3d = dg::evaluate( dg::zero, g_); std::vector<container> plus2d( g_.global().Nz(), (container)dg::evaluate(dg::zero, g2d.local()) ), minus2d( plus2d), result( plus2d); container tXYplus( tempXYplus_[0]), tXYminus( tempXYminus_[0]); unsigned turns = rounds; if( turns ==0) turns++; //first apply Interpolation many times, scale and store results int dims[3], periods[3], coords[3]; MPI_Cart_get( g_.communicator(), 3, dims, periods, coords); int sizeXY = dims[0]*dims[1]; for( unsigned r=0; r<turns; r++) for( unsigned i0=0; i0<g_.global().Nz(); i0++) { dg::blas1::copy( init2d.data(), tempP); dg::blas1::copy( init2d.data(), tempM); unsigned rep = i0 + r*g_.global().Nz(); for(unsigned k=0; k<rep; k++) { if( sizeXY != 1){ dg::blas2::symv( plus, tempP, tXYplus); commXYplus_.send_and_reduce( tXYplus, temp); } else dg::blas2::symv( plus, tempP, temp); temp.swap( tempP); if( sizeXY != 1){ dg::blas2::symv( minus, tempM, tXYminus); commXYminus_.send_and_reduce( tXYminus, temp); } else dg::blas2::symv( minus, tempM, temp); temp.swap( tempM); } dg::blas1::scal( tempP, unary( (double)rep*g_.hz() ) ); dg::blas1::scal( tempM, unary( -(double)rep*g_.hz() ) ); dg::blas1::axpby( 1., tempP, 1., plus2d[i0]); dg::blas1::axpby( 1., tempM, 1., minus2d[i0]); } //now we have the plus and the minus filaments if( rounds == 0) //there is a limiter { for( unsigned i0=0; i0<g_.Nz(); i0++) { int idx = (int)(i0+coords[2]*g_.Nz()) - (int)p0; if(idx>=0) result[i0] = plus2d[idx]; else result[i0] = minus2d[abs(idx)]; thrust::copy( result[i0].begin(), result[i0].end(), vec3d.data().begin() + i0*g2d.size()); } } else //sum up plus2d and minus2d { for( unsigned i0=0; i0<g_.global().Nz(); i0++) { //int idx = (int)(i0+coords[2]*g_.Nz()); unsigned revi0 = (g_.global().Nz() - i0)%g_.global().Nz(); //reverted index dg::blas1::axpby( 1., plus2d[i0], 0., result[i0]); dg::blas1::axpby( 1., minus2d[revi0], 1., result[i0]); } dg::blas1::axpby( -1., init2d.data(), 1., result[0]); for(unsigned i0=0; i0<g_.Nz(); i0++) { int idx = ((int)i0 + coords[2]*g_.Nz() -(int)p0 + g_.global().Nz())%g_.global().Nz(); //shift index thrust::copy( result[idx].begin(), result[idx].end(), vec3d.data().begin() + i0*g2d.size()); } } return vec3d; } template<class G, class M, class C, class container> void MPI_FieldAligned<G,M,C, container>::einsPlus( const MPI_Vector<container>& f, MPI_Vector<container>& fplus ) { //dg::blas2::detail::doSymv( plus, f, fplus, MPIMatrixTag(), MPIVectorTag(), MPIVectorTag()); const container& in = f.data(); container& out = fplus.data(); int size2d = g_.n()*g_.n()*g_.Nx()*g_.Ny(); int dims[3], periods[3], coords[3]; MPI_Cart_get( g_.communicator(), 3, dims, periods, coords); int sizeXY = dims[0]*dims[1]; int sizeZ = dims[2]; //1. compute 2d interpolation in every plane and store in temp_ if( sizeXY != 1) //communication needed { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*plus.num_cols, in.cbegin() + (i0+1)*plus.num_cols); View tempV( tempXYplus_[i0].begin(), tempXYplus_[i0].end() ); cusp::multiply( plus, inV, tempV); //exchange data in XY commXYplus_.send_and_reduce( tempXYplus_[i0], temp_[i0]); } } else //directly compute in temp_ { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*plus.num_cols, in.cbegin() + (i0+1)*plus.num_cols); View tempV( temp_[i0].begin(), temp_[i0].end() ); cusp::multiply( plus, inV, tempV); } } //2. reorder results and communicate halo in z for( int i0=0; i0<(int)g_.Nz(); i0++) { int ip = i0 + 1; if( ip > (int)g_.Nz()-1) ip -= (int)g_.Nz(); thrust::copy( temp_[ip].begin(), temp_[ip].begin() + size2d, out.begin() + i0*size2d); } if( sizeZ != 1) { commZ_.sendBackward( temp_[0], tempZ_); thrust::copy( tempZ_.begin(), tempZ_.end(), out.begin() + (g_.Nz()-1)*size2d); } //make ghostcells in last plane unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); if( bcz_ != dg::PER && g_.z1() == g_.global().z1()) { unsigned i0 = g_.Nz()-1, im = g_.Nz()-2, ip = 0; cView fp( in.cbegin() + ip*size, in.cbegin() + (ip+1)*size); cView f0( in.cbegin() + i0*size, in.cbegin() + (i0+1)*size); cView fm( in.cbegin() + im*size, in.cbegin() + (im+1)*size); View outV( out.begin() + i0*size, out.begin() + (i0+1)*size); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); //overwrite out cusp::copy( f0, ghostPV); if( bcz_ == dg::DIR || bcz_ == dg::NEU_DIR) { dg::blas1::axpby( 2., right_, -1, ghostP); } if( bcz_ == dg::NEU || bcz_ == dg::DIR_NEU) { //note that hp_ is 3d and the rest 2d thrust::transform( right_.begin(), right_.end(), hp_.data().begin(), ghostM.begin(), thrust::multiplies<double>()); dg::blas1::axpby( 1., ghostM, 1., ghostP); } cusp::blas::axpby( ghostPV, outV, ghostPV, 1.,-1.); dg::blas1::pointwiseDot( limiter_, ghostP, ghostP); cusp::blas::axpby( ghostPV, outV, outV, 1.,1.); } } template<class G,class M, class C, class container> void MPI_FieldAligned<G,M,C,container>::einsMinus( const MPI_Vector<container>& f, MPI_Vector<container>& fminus ) { const container& in = f.data(); container& out = fminus.data(); //dg::blas2::detail::doSymv( minus, f, fminus, MPIMatrixTag(), MPIVectorTag(), MPIVectorTag()); int size2d = g_.n()*g_.n()*g_.Nx()*g_.Ny(); int dims[3], periods[3], coords[3]; MPI_Cart_get( g_.communicator(), 3, dims, periods, coords); int sizeXY = dims[0]*dims[1]; int sizeZ = dims[2]; //1. compute 2d interpolation in every plane and store in temp_ if( sizeXY != 1) //communication needed { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*minus.num_cols, in.cbegin() + (i0+1)*minus.num_cols); View tempV( tempXYminus_[i0].begin(), tempXYminus_[i0].end()); cusp::multiply( minus, inV, tempV); //exchange data in XY commXYminus_.send_and_reduce( tempXYminus_[i0], temp_[i0]); } } else //directly compute in temp_ { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*minus.num_cols, in.cbegin() + (i0+1)*minus.num_cols); View tempV( temp_[i0].begin(), temp_[i0].end() ); cusp::multiply( minus, inV, tempV); } } //2. reorder results and communicate halo in z for( int i0=0; i0<(int)g_.Nz(); i0++) { int ip = i0 -1; if( ip < 0) ip += (int)g_.Nz(); thrust::copy( temp_[ip].begin(), temp_[ip].end(), out.begin() + i0*size2d); } if( sizeZ != 1) { commZ_.sendForward( temp_[g_.Nz()-1], tempZ_); thrust::copy( tempZ_.begin(), tempZ_.end(), out.begin()); } //make ghostcells in first plane unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); if( bcz_ != dg::PER && g_.z0() == g_.global().z0()) { unsigned i0 = 0, im = g_.Nz()-1, ip = 1; cView fp( in.cbegin() + ip*size, in.cbegin() + (ip+1)*size); cView f0( in.cbegin() + i0*size, in.cbegin() + (i0+1)*size); cView fm( in.cbegin() + im*size, in.cbegin() + (im+1)*size); View outV( out.begin() + i0*size, out.begin() + (i0+1)*size); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); //overwrite out cusp::copy( f0, ghostMV); if( bcz_ == dg::DIR || bcz_ == dg::DIR_NEU) { dg::blas1::axpby( 2., left_, -1, ghostM); } if( bcz_ == dg::NEU || bcz_ == dg::NEU_DIR) { thrust::transform( left_.begin(), left_.end(), hm_.data().begin(), ghostP.begin(), thrust::multiplies<double>()); dg::blas1::axpby( -1, ghostP, 1., ghostM); } cusp::blas::axpby( ghostMV, outV, ghostMV, 1.,-1.); dg::blas1::pointwiseDot( limiter_, ghostM, ghostM); cusp::blas::axpby( ghostMV, outV, outV, 1.,1.); } } template< class G, class M, class C, class container> void MPI_FieldAligned<G,M,C,container>::einsMinusT( const MPI_Vector<container>& f, MPI_Vector<container>& fpe) { //dg::blas2::detail::doSymv( minusT, f, fpe, MPIMatrixTag(), MPIVectorTag(), MPIVectorTag()); const container& in = f.data(); container& out = fpe.data(); int size2d = g_.n()*g_.n()*g_.Nx()*g_.Ny(); int dims[3], periods[3], coords[3]; MPI_Cart_get( g_.communicator(), 3, dims, periods, coords); int sizeXY = dims[0]*dims[1]; int sizeZ = dims[2]; //1. compute 2d interpolation in every plane and store in temp_ if( sizeXY != 1) //communication needed { //first exchange data in XY for( int i0=0; i0<(int)g_.Nz(); i0++) { thrust::copy( in.cbegin() + i0*size2d, in.cbegin() + (i0+1)*size2d, temp_[i0].begin()); tempXYminus_[i0] = commXYminus_.collect( temp_[i0] ); cView inV( tempXYminus_[i0].cbegin(), tempXYminus_[i0].cend() ); View tempV( temp_[i0].begin(), temp_[i0].end() ); cusp::multiply( minusT, inV, tempV); } } else //directly compute in temp_ { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*minusT.num_cols, in.cbegin() + (i0+1)*minusT.num_cols); View tempV( temp_[i0].begin() , temp_[i0].end() ); cusp::multiply( minusT, inV, tempV); } } //2. reorder results and communicate halo in z for( int i0=0; i0<(int)g_.Nz(); i0++) { int ip = i0 + 1; if( ip > (int)g_.Nz()-1) ip -= (int)g_.Nz(); thrust::copy( temp_[ip].begin(), temp_[ip].end(), out.begin() + i0*size2d); } if( sizeZ != 1) { commZ_.sendBackward( temp_[0], tempZ_); thrust::copy( tempZ_.begin(), tempZ_.end(), out.begin() + (g_.Nz()-1)*size2d); } //make ghostcells in last plane unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); if( bcz_ != dg::PER && g_.z1() == g_.global().z1()) { unsigned i0 = g_.Nz()-1, im = g_.Nz()-2, ip = 0; cView fp( in.cbegin() + ip*size, in.cbegin() + (ip+1)*size); cView f0( in.cbegin() + i0*size, in.cbegin() + (i0+1)*size); cView fm( in.cbegin() + im*size, in.cbegin() + (im+1)*size); View outV( out.begin() + i0*size, out.begin() + (i0+1)*size); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); //overwrite out cusp::copy( f0, ghostPV); if( bcz_ == dg::DIR || bcz_ == dg::NEU_DIR) { dg::blas1::axpby( 2., right_, -1, ghostP); } if( bcz_ == dg::NEU || bcz_ == dg::DIR_NEU) { //note that hp_ is 3d and the rest 2d thrust::transform( right_.begin(), right_.end(), hp_.data().begin(), ghostM.begin(), thrust::multiplies<double>()); dg::blas1::axpby( 1., ghostM, 1., ghostP); } cusp::blas::axpby( ghostPV, outV, ghostPV, 1.,-1.); dg::blas1::pointwiseDot( limiter_, ghostP, ghostP); cusp::blas::axpby( ghostPV, outV, outV, 1.,1.); } } template< class G,class M, class C, class container> void MPI_FieldAligned<G,M,C,container>::einsPlusT( const MPI_Vector<container>& f, MPI_Vector<container>& fme) { //dg::blas2::detail::doSymv( plusT, f, fme, MPIMatrixTag(), MPIVectorTag(), MPIVectorTag()); const container& in = f.data(); container& out = fme.data(); int size2d = g_.n()*g_.n()*g_.Nx()*g_.Ny(); int dims[3], periods[3], coords[3]; MPI_Cart_get( g_.communicator(), 3, dims, periods, coords); int sizeXY = dims[0]*dims[1]; int sizeZ = dims[2]; //1. compute 2d interpolation in every plane and store in temp_ if( sizeXY != 1) //communication needed { for( int i0=0; i0<(int)g_.Nz(); i0++) { //first exchange data in XY thrust::copy( in.cbegin() + i0*size2d, in.cbegin() + (i0+1)*size2d, temp_[i0].begin()); tempXYplus_[i0] = commXYplus_.collect( temp_[i0]); cView inV( tempXYplus_[i0].cbegin(), tempXYplus_[i0].cend() ); View tempV( temp_[i0].begin(), temp_[i0].end() ); cusp::multiply( plusT, inV, tempV); } } else //directly compute in temp_ { for( int i0=0; i0<(int)g_.Nz(); i0++) { cView inV( in.cbegin() + i0*plus.num_cols, in.cbegin() + (i0+1)*plus.num_cols); View tempV( temp_[i0].begin(), temp_[i0].end()); cusp::multiply( plusT, inV, tempV); } } //2. reorder results and communicate halo in z for( int i0=0; i0<(int)g_.Nz(); i0++) { int ip = i0 - 1; if( ip < 0 ) ip += (int)g_.Nz(); thrust::copy( temp_[ip].begin(), temp_[ip].end(), out.begin() + i0*size2d); } if( sizeZ != 1) { commZ_.sendForward( temp_[g_.Nz()-1], tempZ_); thrust::copy( tempZ_.begin(), tempZ_.end(), out.begin()); } //make ghostcells in first plane unsigned size = g_.n()*g_.n()*g_.Nx()*g_.Ny(); if( bcz_ != dg::PER && g_.z0() == g_.global().z0()) { unsigned i0 = 0, im = g_.Nz()-1, ip = 1; cView fp( in.cbegin() + ip*size, in.cbegin() + (ip+1)*size); cView f0( in.cbegin() + i0*size, in.cbegin() + (i0+1)*size); cView fm( in.cbegin() + im*size, in.cbegin() + (im+1)*size); View outV( out.begin() + i0*size, out.begin() + (i0+1)*size); View ghostPV( ghostP.begin(), ghostP.end()); View ghostMV( ghostM.begin(), ghostM.end()); //overwrite out cusp::copy( f0, ghostMV); if( bcz_ == dg::DIR || bcz_ == dg::DIR_NEU) { dg::blas1::axpby( 2., left_, -1, ghostM); } if( bcz_ == dg::NEU || bcz_ == dg::NEU_DIR) { thrust::transform( left_.begin(), left_.end(), hm_.data().begin(), ghostP.begin(), thrust::multiplies<double>()); dg::blas1::axpby( -1, ghostP, 1., ghostM); } cusp::blas::axpby( ghostMV, outV, ghostMV, 1.,-1.); dg::blas1::pointwiseDot( limiter_, ghostM, ghostM); cusp::blas::axpby( ghostMV, outV, outV, 1.,1.); } } ///@endcond }//namespace dg
sse.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> * 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com> * 2015 Brandon Rowlett <browlett@nvidia.com> * 2015 Ken Fast <kfast@gdeb.com> */ #if !defined(SIMDE_X86_SSE_H) #define SIMDE_X86_SSE_H #include "mmx.h" #if defined(_WIN32) #include <windows.h> #endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else SIMDE_ALIGN_TO_16 int8_t i8[16]; SIMDE_ALIGN_TO_16 int16_t i16[8]; SIMDE_ALIGN_TO_16 int32_t i32[4]; SIMDE_ALIGN_TO_16 int64_t i64[2]; SIMDE_ALIGN_TO_16 uint8_t u8[16]; SIMDE_ALIGN_TO_16 uint16_t u16[8]; SIMDE_ALIGN_TO_16 uint32_t u32[4]; SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128[1]; SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif SIMDE_ALIGN_TO_16 simde_float32 f32[4]; SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_ALIGN_TO_16 __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 int8x16_t neon_i8; SIMDE_ALIGN_TO_16 int16x8_t neon_i16; SIMDE_ALIGN_TO_16 int32x4_t neon_i32; SIMDE_ALIGN_TO_16 int64x2_t neon_i64; SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; #if defined(SIMDE_X86_SSE_NATIVE) typedef __m128 simde__m128; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32x4_t simde__m128; #elif defined(SIMDE_WASM_SIMD128_NATIVE) typedef v128_t simde__m128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) typedef simde__m128 __m128; #endif HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect"); HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned"); #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_private(simde__m128_private v) { simde__m128 r; simde_memcpy(&r, &v, sizeof(r)); return r; } SIMDE_FUNCTION_ATTRIBUTES simde__m128_private simde__m128_to_private(simde__m128 v) { simde__m128_private r; simde_memcpy(&r, &v, sizeof(r)); return r; } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64) #endif #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32) #if defined(SIMDE_BUG_GCC_95782) SIMDE_FUNCTION_ATTRIBUTES SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128_to_altivec_f32(simde__m128 value) { simde__m128_private r_ = simde__m128_to_private(value); return r_.altivec_f32; } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) { simde__m128_private r_; r_.altivec_f32 = value; return simde__m128_from_private(r_); } #else SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32) #endif #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128); #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST, SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN, SIMDE_MM_ROUND_UP = _MM_ROUND_UP, SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO #else SIMDE_MM_ROUND_NEAREST = 0x0000, SIMDE_MM_ROUND_DOWN = 0x2000, SIMDE_MM_ROUND_UP = 0x4000, SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000 #endif }; #if defined(_MM_FROUND_TO_NEAREST_INT) #define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT #define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF #define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF #define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO #define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION #define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC #define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC #else #define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00 #define SIMDE_MM_FROUND_TO_NEG_INF 0x01 #define SIMDE_MM_FROUND_TO_POS_INF 0x02 #define SIMDE_MM_FROUND_TO_ZERO 0x03 #define SIMDE_MM_FROUND_CUR_DIRECTION 0x04 #define SIMDE_MM_FROUND_RAISE_EXC 0x00 #define SIMDE_MM_FROUND_NO_EXC 0x08 #endif #define SIMDE_MM_FROUND_NINT (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_FLOOR (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_CEIL (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_TRUNC (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_RINT (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_NEARBYINT (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC) #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT) #define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT #define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF #define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF #define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO #define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION #define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC #define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT #define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR #define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL #define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC #define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT #define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT #endif SIMDE_FUNCTION_ATTRIBUTES unsigned int SIMDE_MM_GET_ROUNDING_MODE(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _MM_GET_ROUNDING_MODE(); #elif defined(SIMDE_HAVE_FENV_H) unsigned int vfe_mode; switch (fegetround()) { #if defined(FE_TONEAREST) case FE_TONEAREST: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; #endif #if defined(FE_TOWARDZERO) case FE_TOWARDZERO: vfe_mode = SIMDE_MM_ROUND_DOWN; break; #endif #if defined(FE_UPWARD) case FE_UPWARD: vfe_mode = SIMDE_MM_ROUND_UP; break; #endif #if defined(FE_DOWNWARD) case FE_DOWNWARD: vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO; break; #endif default: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; } return vfe_mode; #else return SIMDE_MM_ROUND_NEAREST; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE() #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_ROUNDING_MODE(a); #elif defined(SIMDE_HAVE_FENV_H) int fe_mode = FE_TONEAREST; switch (a) { #if defined(FE_TONEAREST) case SIMDE_MM_ROUND_NEAREST: fe_mode = FE_TONEAREST; break; #endif #if defined(FE_TOWARDZERO) case SIMDE_MM_ROUND_TOWARD_ZERO: fe_mode = FE_TOWARDZERO; break; #endif #if defined(FE_DOWNWARD) case SIMDE_MM_ROUND_DOWN: fe_mode = FE_DOWNWARD; break; #endif #if defined(FE_UPWARD) case SIMDE_MM_ROUND_UP: fe_mode = FE_UPWARD; break; #endif default: return; } fesetround(fe_mode); #else (void)a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_mm_getcsr(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr(); #else return SIMDE_MM_GET_ROUNDING_MODE(); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_getcsr() simde_mm_getcsr() #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_setcsr(uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_setcsr(a); #else SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a)); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setcsr(a) simde_mm_setcsr(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_round_ps(simde__m128 a, int rounding, int lax_rounding) SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) { simde__m128_private r_, a_ = simde__m128_to_private(a); (void)lax_rounding; /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, * but in ARMv8+ the rounding mode is ignored and nearest is always * used, so we treat ARMv7 as having a rounding mode but ARMv8 as * not. */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ARM_NEON_A32V8) if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; #endif switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndnq_f32(a_.neon_f32); #elif defined(simde_math_roundevenf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_roundevenf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_floorf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_truncf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; default: HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); } return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) #define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding)) #else #define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps(simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps(e3, e2, e1, e0); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 simde_float32 data[4] = {e0, e1, e2, e3}; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); #else r_.f32[0] = e0; r_.f32[1] = e1; r_.f32[2] = e2; r_.f32[3] = e3; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps1(simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps1(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) (void)a; return vec_splats(a); #else return simde_mm_set_ps(a, a, a, a); #endif } #define simde_mm_set1_ps(a) simde_mm_set_ps1(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_set_ps1(a) simde_mm_set_ps1(a) #define _mm_set1_ps(a) simde_mm_set1_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_move_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_move_ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 + b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = a_.f32[i] + b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_add_ps(a, b) simde_mm_add_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0); float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); // the upper values in the result must be the remnants of <a>. r_.neon_f32 = vaddq_f32(a_.neon_f32, value); #else r_.f32[0] = a_.f32[0] + b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_add_ss(a, b) simde_mm_add_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_and_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_and_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 & b_.i32; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = a_.i32[i] & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_and_ps(a, b) simde_mm_and_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_andnot_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_andnot_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32 & b_.i32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = ~(a_.i32[i]) & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_xor_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_xor_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) { r_.u32[i] = a_.u32[i] ^ b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_or_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_or_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) { r_.u32[i] = a_.u32[i] | b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_or_ps(a, b) simde_mm_or_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = ~(a_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) { /* This function is for when you want to blend two elements together * according to a mask. It is similar to _mm_blendv_ps, except that * it is undefined whether the blend is based on the highest bit in * each lane (like blendv) or just bitwise operations. This allows * us to implement the function efficiently everywhere. * * Basically, you promise that all the lanes in mask are either 0 or * ~0. */ #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_ps(a, b, mask); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b), mask_ = simde__m128_to_private(mask); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu16(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \ defined(SIMDE_CONVERT_VECTOR_) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u16); SIMDE_CONVERT_VECTOR_(wb, b_.u16); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u16, wr); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) { r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b) #define _m_pavgw(a, b) simde_mm_avg_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu8(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \ defined(SIMDE_CONVERT_VECTOR_) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u8); SIMDE_CONVERT_VECTOR_(wb, b_.u8); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u8, wr); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) { r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b) #define _m_pavgb(a, b) simde_mm_avg_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_abs_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7, 1, 0)) return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_abs(a_.altivec_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_fabsf(a_.f32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpge_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpgt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && \ !defined(HEDLEY_IBM_VERSION) /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) is missing from XL C/C++ v16.1.1, though the documentation (table 89 on page 432 of the IBM XL C/C++ for Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ps(simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ss(simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ps(simde__m128 a, simde__m128 b) { return simde_mm_cmple_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ss(simde__m128 a, simde__m128 b) { return simde_mm_cmple_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ps(simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ss(simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ps(simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ss(simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Note: NEON does not have ordered compare builtin Need to compare a eq a and b eq b to check for NaN Do AND of results to get final */ uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpunord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comieq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #else return a_.f32[0] == b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comige_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #else return a_.f32[0] >= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comigt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #else return a_.f32[0] > b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comile_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #else return a_.f32[0] <= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comilt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #else return a_.f32[0] < b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comineq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #else return a_.f32[0] != b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { simde__m128_private r_, dest_ = simde__m128_to_private(dest), src_ = simde__m128_to_private(src); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0))); r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) #if !defined(HEDLEY_IBM_VERSION) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f)); r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos); #elif defined(SIMDE_IEEE754_STORAGE) (void)src_; (void)dest_; simde__m128 sign_pos = simde_mm_set1_ps(-0.0f); r_ = simde__m128_to_private( simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos))); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]); } #endif return simde__m128_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) { return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_pi2ps(simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_pi2ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32)b_.i32[0]; r_.f32[1] = (simde_float32)b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvt_ps2pi(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_si2ss(simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_si2ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); r_.i32[1] = a_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvt_ss2si(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi16_ps(simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { simde_float32 v = a_.i16[i]; r_.f32[i] = v; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32_ps(simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32)b_.i32[0]; r_.f32[1] = (simde_float32)b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32x2_ps(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32x2_ps(a, b); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32); SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32); #else r_.f32[0] = (simde_float32)a_.i32[0]; r_.f32[1] = (simde_float32)a_.i32[1]; r_.f32[2] = (simde_float32)b_.i32[0]; r_.f32[3] = (simde_float32)b_.i32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi8_ps(simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8)))); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]); r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]); r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]); r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi16(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi16(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) { r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi32(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi32(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi8(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi8(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471) /* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to * i16, combine with an all-zero vector of i16 (which will become the upper * half), narrow to i8. */ float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)); float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)); float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min)); r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0))); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(a_.f32) / sizeof(a_.f32[0])); i++) { if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)) r_.i8[i] = INT8_MAX; else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)) r_.i8[i] = INT8_MIN; else r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i])); } /* Note: the upper half is undefined */ #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu16_ps(simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = (simde_float32)a_.u16[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu8_ps(simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8)))); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi32_ss(simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtsi32_ss(a, b); #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi64_ss(simde__m128 a, int64_t b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtsi64_ss(a, b); #else return _mm_cvtsi64x_ss(a, b); #endif #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32 simde_mm_cvtss_f32(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtss_f32(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_f32(a_.neon_f32, 0); #else return a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtss_si32(simde__m128 a) { return simde_mm_cvt_ss2si(a); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvtss_si64(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtss_si64(a); #else return _mm_cvtss_si64x(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0))); #else return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtt_ps2pi(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); } #endif return simde__m64_from_private(r_); #endif } #define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a)) #define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtt_ss2si(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtt_ss2si(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif #endif } #define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a)) #define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvttss_si64(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER) #if defined(__PGI) return _mm_cvttss_si64x(a); #else return _mm_cvttss_si64(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip0 = vrecpeq_f32(b_.neon_f32); float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = a_.f32[i] / b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_div_ps(a, b) simde_mm_div_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = a_.f32[0] / b_.f32[0]; SIMDE_VECTORIZE for (size_t i = 1; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_div_ss(a, b) simde_mm_div_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int16_t simde_mm_extract_pi16(simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) #if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ #define simde_mm_extract_pi16(a, imm8) \ (HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) HEDLEY_DIAGNOSTIC_POP) #else #define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8)) #define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_insert_pi16(simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private r_, a_ = simde__m64_to_private(a); r_.i64[0] = a_.i64[0]; r_.i16[imm8] = i; return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) #if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ #define ssimde_mm_insert_pi16(a, i, imm8) \ (HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic ignored \"-Wvector-conversion\"")( \ _mm_insert_pi16((a), (i), (imm8))) HEDLEY_DIAGNOSTIC_POP) #else #define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) #endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define simde_mm_insert_pi16(a, i, imm8) \ simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load1_ps(simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_dup_f32(mem_addr); #else r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr)); #endif return simde__m128_from_private(r_); #endif } #define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ss(simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ss(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0); #else r_.f32[0] = *mem_addr; r_.i32[1] = 0; r_.i32[2] = 0; r_.i32[3] = 0; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadh_pi(simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr))); #else simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr); r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*)(mem_addr)) #endif /* The SSE documentation says that there are no alignment requirements for mem_addr. Unfortunately they used the __m64 type for the argument which is supposed to be 8-byte aligned, so some compilers (like clang with -Wcast-align) will generate a warning if you try to cast, say, a simde_float32* to a simde__m64* for this function. I think the choice of argument type is unfortunate, but I do think we need to stick to it here. If there is demand I can always add something like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadl_pi(simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32)); #else simde__m64_private b_; simde_memcpy(&b_, mem_addr, sizeof(b_)); r_.i32[0] = b_.i32[0]; r_.i32[1] = b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*)(mem_addr)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else simde__m128_private r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrev64q_f32(v_.neon_f32); r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_reve(v_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); #else r_.f32[0] = v_.f32[3]; r_.f32[1] = v_.f32[2]; r_.f32[2] = v_.f32[1]; r_.f32[3] = v_.f32[0]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadu_ps(simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadu_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #else simde_memcpy(&r_, mem_addr, sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskmove_si64(simde__m64 a, simde__m64 mask, int8_t* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr)); #else simde__m64_private a_ = simde__m64_to_private(a), mask_ = simde__m64_to_private(mask); SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(a_.i8) / sizeof(a_.i8[0])); i++) if (mask_.i8[i] < 0) mem_addr[i] = a_.i8[i]; #endif } #define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_maskmove_si64(a, mask, mem_addr) \ simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #define _m_maskmovq(a, mask, mem_addr) \ simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pi16(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) { r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b) #define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_max_ps(a, b) simde_mm_max_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pu8(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) { r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b) #define _m_pmaxub(a, b) simde_mm_max_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_max_ss(a, b) simde_mm_max_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pi16(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) { r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b) #define _m_pminsw(a, b) simde_mm_min_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) return simde__m128_from_neon_f32( vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); #else r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); #endif return simde__m128_from_private(r_); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); #else r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); #endif return simde__m128_from_private(r_); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) simde__m128 mask = simde_mm_cmplt_ps(a, b); return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_min_ps(a, b) simde_mm_min_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pu8(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) { r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminub(a, b) simde_mm_min_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b) #define _m_pminub(a, b) simde_mm_min_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_min_ss(a, b) simde_mm_min_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movehl_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movehl_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergel(b_.altivec_i64, a_.altivec_i64)); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else r_.f32[0] = b_.f32[2]; r_.f32[1] = b_.f32[3]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movelh_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movelh_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergeh(a_.altivec_i64, b_.altivec_i64)); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_pi8(simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_pi8(a); #else simde__m64_private a_ = simde__m64_to_private(a); int r = 0; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) uint8x8_t input = a_.neon_u8; const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0}; const uint8x8_t mask_and = vdup_n_u8(0x80); const int8x8_t mask_shift = vld1_s8(xr); const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift); uint8x8_t lo = mask_result; r = vaddv_u8(lo); #else const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]); SIMDE_VECTORIZE_REDUCTION(| : r) for (size_t i = 0; i < nmemb; i++) { r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i); } #endif return r; #endif } #define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a) #define _m_pmovmskb(a) simde_mm_movemask_pi8(a) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_ps(a); #else int r = 0; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) static const int32_t shift_amount[] = {0, 1, 2, 3}; const int32x4_t shift = vld1q_s32(shift_amount); uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); #else SIMDE_VECTORIZE_REDUCTION(| : r) for (size_t i = 0; i < sizeof(a_.u32) / sizeof(a_.u32[0]); i++) { r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i; } #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_movemask_ps(a) simde_mm_movemask_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = a_.f32[i] * b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] * b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_mulhi_pu16(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_mulhi_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16); const uint32x4_t t2 = vshrq_n_u32(t1, 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) { r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16))); } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b) #define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif #define SIMDE_MM_HINT_NTA 0 #define SIMDE_MM_HINT_T0 1 #define SIMDE_MM_HINT_T1 2 #define SIMDE_MM_HINT_T2 3 #define SIMDE_MM_HINT_ENTA 4 #define SIMDE_MM_HINT_ET0 5 #define SIMDE_MM_HINT_ET1 6 #define SIMDE_MM_HINT_ET2 7 #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wreserved-id-macro") _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #endif #undef _MM_HINT_NTA #define _MM_HINT_NTA SIMDE_MM_HINT_NTA #undef _MM_HINT_T0 #define _MM_HINT_T0 SIMDE_MM_HINT_T0 #undef _MM_HINT_T1 #define _MM_HINT_T1 SIMDE_MM_HINT_T1 #undef _MM_HINT_T2 #define _MM_HINT_T2 SIMDE_MM_HINT_T2 #undef _MM_HINT_ETNA #define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA #undef _MM_HINT_ET0 #define _MM_HINT_ET0 SIMDE_MM_HINT_ET0 #undef _MM_HINT_ET1 #define _MM_HINT_ET1 SIMDE_MM_HINT_ET1 #undef _MM_HINT_ET1 #define _MM_HINT_ET2 SIMDE_MM_HINT_ET2 HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch(char const* p, int i) { #if defined(HEDLEY_GCC_VERSION) __builtin_prefetch(p); #else (void)p; #endif (void)i; } #if defined(SIMDE_X86_SSE_NATIVE) #if defined(__clang__) && \ !SIMDE_DETECT_CLANG_VERSION_CHECK(10, 0, 0) /* https://reviews.llvm.org/D71718 */ #define simde_mm_prefetch(p, i) \ (__extension__({ \ HEDLEY_DIAGNOSTIC_PUSH \ HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ _mm_prefetch((p), (i)); \ HEDLEY_DIAGNOSTIC_POP \ })) #else #define simde_mm_prefetch(p, i) _mm_prefetch(p, i) #endif #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_negate_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8, 1, 0)) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = -a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip = vrecpeq_f32(a_.neon_f32); #if SIMDE_ACCURACY_PREFERENCE > 0 for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE; ++i) { recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32)); } #endif r_.neon_f32 = recip; #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_re(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.f32 = 1.0f / a_.f32; #elif defined(SIMDE_IEEE754_STORAGE) /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { int32_t ix; simde_float32 fx = a_.f32[i]; simde_memcpy(&ix, &fx, sizeof(ix)); int32_t x = INT32_C(0x7EF311C3) - ix; simde_float32 temp; simde_memcpy(&temp, &x, sizeof(temp)); r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); } #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = 1.0f / a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_rcp_ps(a) simde_mm_rcp_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ss(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); r_.f32[0] = 1.0f / a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_rcp_ss(a) simde_mm_rcp_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_rsqrte(a_.altivec_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); #else simde_float32 x = a_.f32[i]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[i] = x; #endif } #elif defined(simde_math_sqrtf) SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ss(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_IEEE754_STORAGE) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1); #else simde_float32 x = a_.f32[0]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[0] = x; #endif } r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #elif defined(simde_math_sqrtf) r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_sad_pu8(simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_sad_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); uint16_t r0 = t[0] + t[1] + t[2] + t[3]; r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); #else uint16_t sum = 0; #if defined(SIMDE_HAVE_STDLIB_H) SIMDE_VECTORIZE_REDUCTION(+ : sum) for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) { sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); } r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); r_.i16[1] = 0; r_.i16[2] = 0; r_.i16[3] = 0; #else HEDLEY_UNREACHABLE(); #endif #endif return simde__m64_from_private(r_); #endif } #define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b) #define _m_psadbw(a, b) simde_mm_sad_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ss(simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ss(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0); #else return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_set_ss(a) simde_mm_set_ss(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setr_ps(simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setr_ps(e3, e2, e1, e0); #else return simde_mm_set_ps(e0, e1, e2, e3); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setzero_ps(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setzero_ps(); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(SIMDE_FLOAT32_C(0.0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_splats(SIMDE_FLOAT32_C(0.0)); #else simde__m128 r; simde_memset(&r, 0, sizeof(r)); return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setzero_ps() simde_mm_setzero_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_undefined_ps(void) { simde__m128_private r_; #if defined(SIMDE_HAVE_UNDEFINED128) r_.n = _mm_undefined_ps(); #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) r_ = simde__m128_to_private(simde_mm_setzero_ps()); #endif return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_undefined_ps() simde_mm_undefined_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_setone_ps(void) { simde__m128 t = simde_mm_setzero_ps(); return simde_mm_cmpeq_ps(t, t); } SIMDE_FUNCTION_ATTRIBUTES void simde_mm_sfence(void) { /* TODO: Use Hedley. */ #if defined(SIMDE_X86_SSE_NATIVE) _mm_sfence(); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) __atomic_thread_fence(__ATOMIC_SEQ_CST); #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ !defined(__STDC_NO_ATOMICS__) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) __atomic_thread_fence(__ATOMIC_SEQ_CST); #else atomic_thread_fence(memory_order_seq_cst); #endif #elif defined(_MSC_VER) MemoryBarrier(); #elif HEDLEY_HAS_EXTENSION(c_atomic) __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) __sync_synchronize(); #elif defined(_OPENMP) #pragma omp critical(simde_mm_sfence_) { } #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sfence() simde_mm_sfence() #endif #define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) #define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shuffle_pi16(a, imm8) \ (__extension__({ \ const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \ simde__m64_from_private((simde__m64_private){ \ .i16 = SIMDE_SHUFFLE_VECTOR_(16, 8, (simde__tmp_a_).i16, (simde__tmp_a_).i16, \ (((imm8)) & 3), (((imm8) >> 2) & 3), (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3))}); \ })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_shuffle_pi16(simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); for (size_t i = 0; i < sizeof(r_.i16) / sizeof(r_.i16[0]); i++) { r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3]; } HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wconditional-uninitialized") #pragma clang diagnostic ignored "-Wconditional-uninitialized" #endif return simde__m64_from_private(r_); HEDLEY_DIAGNOSTIC_POP } #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) #define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8) #else #define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8) #define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) #define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) #define simde_mm_shuffle_ps(a, b, imm8) \ (__extension__({ \ simde__m128_from_private((simde__m128_private){ \ .f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, simde__m128_to_private(a).f32, \ simde__m128_to_private(b).f32, (((imm8)) & 3), \ (((imm8) >> 2) & 3), (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4)}); \ })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps(simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[(imm8 >> 0) & 3]; r_.f32[1] = a_.f32[(imm8 >> 2) & 3]; r_.f32[2] = b_.f32[(imm8 >> 4) & 3]; r_.f32[3] = b_.f32[(imm8 >> 6) & 3]; return simde__m128_from_private(r_); } #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vsqrtq_f32(a_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t est = vrsqrteq_f32(a_.neon_f32); for (int i = 0; i <= SIMDE_ACCURACY_PREFERENCE; i++) { est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est); } r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_sqrt(a_.altivec_f32); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0; i < sizeof(r_.f32) / sizeof(r_.f32[0]); i++) { r_.f32[i] = simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ss(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #elif defined(simde_math_sqrtf) r_.f32[0] = simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps(simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else simde_memcpy(mem_addr, &a_, sizeof(a)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_store_ps(mem_addr, a) \ simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_ps(simde_float32 mem_addr[4], simde__m128 a) { simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps1(mem_addr_, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); #elif defined(SIMDE_SHUFFLE_VECTOR_) simde__m128_private tmp_; tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); simde_mm_store_ps(mem_addr_, tmp_.f32); #else SIMDE_VECTORIZE_ALIGNED(mem_addr_ : 16) for (size_t i = 0; i < sizeof(a_.f32) / sizeof(a_.f32[0]); i++) { mem_addr_[i] = a_.f32[0]; } #endif #endif } #define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_store_ps1(mem_addr, a) \ simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #define _mm_store1_ps(mem_addr, a) \ simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ss(simde_float32* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ss(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_lane_f32(mem_addr, a_.neon_f32, 0); #else *mem_addr = a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_store_ss(mem_addr, a) \ simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi(simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32)); #else simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storel_pi(simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->neon_f32 = vget_low_f32(a_.neon_f32); #else dest_->f32[0] = a_.f32[0]; dest_->f32[1] = a_.f32[1]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps(simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storer_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_reve(a_.altivec_f32), 0, mem_addr); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t tmp = vrev64q_f32(a_.neon_f32); vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2)); #elif defined(SIMDE_SHUFFLE_VECTOR_) a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); #else SIMDE_VECTORIZE_ALIGNED(mem_addr : 16) for (size_t i = 0; i < sizeof(a_.f32) / sizeof(a_.f32[0]); i++) { mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_storer_ps(mem_addr, a) \ simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeu_ps(simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeu_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) vec_vsx_st(a_.altivec_f32, 0, mem_addr); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_storeu_ps(mem_addr, a) \ simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) { r_.f32[i] = a_.f32[i] - b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] - b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomieq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] == b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] == b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomige_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] >= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] >= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomigt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] > b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] > b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomile_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] <= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] <= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomilt_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] < b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] < b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomineq_ss(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] != b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] != b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b)) #endif #if defined(SIMDE_X86_SSE_NATIVE) #if defined(__has_builtin) #if __has_builtin(__builtin_ia32_undef128) #define SIMDE_HAVE_UNDEFINED128 #endif #elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) #define SIMDE_HAVE_UNDEFINED128 #endif #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpackhi_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpackhi_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_high_f32(a_.neon_f32); float32x2_t b1 = vget_high_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7); #else r_.f32[0] = a_.f32[2]; r_.f32[1] = b_.f32[2]; r_.f32[2] = a_.f32[3]; r_.f32[3] = b_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpacklo_ps(simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpacklo_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_low_f32(a_.neon_f32); float32x2_t b1 = vget_low_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = b_.f32[0]; r_.f32[2] = a_.f32[1]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_pi(simde__m64* mem_addr, simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private *dest = HEDLEY_REINTERPRET_CAST(simde__m64_private *, mem_addr), a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest->i64[0] = vget_lane_s64(a_.neon_i64, 0); #else dest->i64[0] = a_.i64[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps(simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_stream_ps(mem_addr, a); #elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m128_private a_ = simde__m128_to_private(a); __builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr)); #else simde_mm_store_ps(mem_addr, a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_stream_ps(mem_addr, a) \ simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), vget_low_f32(ROW23.val[0])); \ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), vget_low_f32(ROW23.val[1])); \ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), vget_high_f32(ROW23.val[0])); \ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), vget_high_f32(ROW23.val[1])); \ } while (0) #else #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ simde__m128 tmp3, tmp2, tmp1, tmp0; \ tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ row0 = simde_mm_movelh_ps(tmp0, tmp2); \ row1 = simde_mm_movehl_ps(tmp2, tmp0); \ row2 = simde_mm_movelh_ps(tmp1, tmp3); \ row3 = simde_mm_movehl_ps(tmp3, tmp1); \ } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) #endif #if defined(_MM_EXCEPT_INVALID) #define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID #else #define SIMDE_MM_EXCEPT_INVALID (0x0001) #endif #if defined(_MM_EXCEPT_DENORM) #define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM #else #define SIMDE_MM_EXCEPT_DENORM (0x0002) #endif #if defined(_MM_EXCEPT_DIV_ZERO) #define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO #else #define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004) #endif #if defined(_MM_EXCEPT_OVERFLOW) #define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW #else #define SIMDE_MM_EXCEPT_OVERFLOW (0x0008) #endif #if defined(_MM_EXCEPT_UNDERFLOW) #define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW #else #define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010) #endif #if defined(_MM_EXCEPT_INEXACT) #define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT #else #define SIMDE_MM_EXCEPT_INEXACT (0x0020) #endif #if defined(_MM_EXCEPT_MASK) #define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK #else #define SIMDE_MM_EXCEPT_MASK \ (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | SIMDE_MM_EXCEPT_DIV_ZERO | \ SIMDE_MM_EXCEPT_OVERFLOW | SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT) #endif #if defined(_MM_MASK_INVALID) #define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID #else #define SIMDE_MM_MASK_INVALID (0x0080) #endif #if defined(_MM_MASK_DENORM) #define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM #else #define SIMDE_MM_MASK_DENORM (0x0100) #endif #if defined(_MM_MASK_DIV_ZERO) #define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO #else #define SIMDE_MM_MASK_DIV_ZERO (0x0200) #endif #if defined(_MM_MASK_OVERFLOW) #define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW #else #define SIMDE_MM_MASK_OVERFLOW (0x0400) #endif #if defined(_MM_MASK_UNDERFLOW) #define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW #else #define SIMDE_MM_MASK_UNDERFLOW (0x0800) #endif #if defined(_MM_MASK_INEXACT) #define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT #else #define SIMDE_MM_MASK_INEXACT (0x1000) #endif #if defined(_MM_MASK_MASK) #define SIMDE_MM_MASK_MASK _MM_MASK_MASK #else #define SIMDE_MM_MASK_MASK \ (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | SIMDE_MM_MASK_DIV_ZERO | \ SIMDE_MM_MASK_OVERFLOW | SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT) #endif #if defined(_MM_FLUSH_ZERO_MASK) #define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK #else #define SIMDE_MM_FLUSH_ZERO_MASK (0x8000) #endif #if defined(_MM_FLUSH_ZERO_ON) #define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON #else #define SIMDE_MM_FLUSH_ZERO_ON (0x8000) #endif #if defined(_MM_FLUSH_ZERO_OFF) #define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF #else #define SIMDE_MM_FLUSH_ZERO_OFF (0x0000) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_SSE_H) */
hermv_c_csc_n_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <memory.h> #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t hermv_csc_n_hi_unroll(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], y[i], beta); } // each thread has a y_local ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < num_threads; i++) { y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); memset(y_local[i], '\0', sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT ais = A->cols_start[i]; ALPHA_INT aie = A->cols_end[i]; ALPHA_INT start = ais; ALPHA_INT end = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx; if(A->row_indx[end] == i){ ALPHA_Number tmp; alpha_mul(tmp, alpha, A->values[end]); alpha_madde(y_local[tid][i], tmp, x[i]); } const ALPHA_INT* A_row = &A->row_indx[ais]; const ALPHA_Number* A_val = &A->values[ais]; ALPHA_INT ai = 0; ALPHA_INT ail = end - start; ALPHA_Number alpha_xi, tmp; alpha_mul(alpha_xi, alpha, x[i]); for(; ai < ail-3; ai+=4) { ALPHA_Number av0 = A_val[ai]; ALPHA_Number av1 = A_val[ai + 1]; ALPHA_Number av2 = A_val[ai + 2]; ALPHA_Number av3 = A_val[ai + 3]; ALPHA_INT ar0 = A_row[ai]; ALPHA_INT ar1 = A_row[ai + 1]; ALPHA_INT ar2 = A_row[ai + 2]; ALPHA_INT ar3 = A_row[ai + 3]; alpha_madde(y_local[tid][ar0], av0, alpha_xi); alpha_madde(y_local[tid][ar1], av1, alpha_xi); alpha_madde(y_local[tid][ar2], av2, alpha_xi); alpha_madde(y_local[tid][ar3], av3, alpha_xi); alpha_mul_3c(tmp, alpha, av0); alpha_madde(y_local[tid][i], tmp, x[ar0]); alpha_mul_3c(tmp, alpha, av1); alpha_madde(y_local[tid][i], tmp, x[ar1]); alpha_mul_3c(tmp, alpha, av2); alpha_madde(y_local[tid][i], tmp, x[ar2]); alpha_mul_3c(tmp, alpha, av3); alpha_madde(y_local[tid][i], tmp, x[ar3]); } for(; ai < ail; ai++) { ALPHA_Number av = A_val[ai]; ALPHA_INT ar = A_row[ai]; alpha_madde(y_local[tid][ar], av, alpha_xi); alpha_mul_3c(tmp, alpha, av); alpha_madde(y_local[tid][i], tmp, x[ar]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT col = 0; col < m; col++) for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_add(y[col], y[col], y_local[i][col]); } for(ALPHA_INT i = 0; i < num_threads; i++) { alpha_free(y_local[i]); } alpha_free(y_local); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return hermv_csc_n_hi_unroll(alpha, A, x, beta, y); }
Concat.h
#ifndef CONCAT #define CONCAT /* * Concat.h: * concatenatation operation. * * Created on: Apr 22, 2017 * Author: mszhang */ #include "MyLib.h" #include "Node.h" #include "Graph.h" #if USE_GPU #include "n3ldg_cuda.h" #endif #include "profiler.h" class ConcatNode : public Node { public: vector<int> inDims; vector<PNode> ins; ConcatNode() : Node() { inDims.clear(); ins.clear(); node_type = "concat"; } #if USE_GPU void toNodeInfo(NodeInfo &info) const override { Node::toNodeInfo(info); for (PNode p : ins) { info.input_vals.push_back(p->val.value); info.input_losses.push_back(p->loss.value); info.input_dims.push_back(p->dim); } } #endif void forward(Graph *cg, const vector<PNode>& x) { if (x.size() == 0) { std::cout << "empty inputs for concat" << std::endl; abort(); } ins.clear(); for (int i = 0; i < x.size(); i++) { ins.push_back(x[i]); } degree = 0; int nSize = ins.size(); for (int i = 0; i < nSize; ++i) { ins[i]->addParent(this); } inDims.clear(); int curDim = 0; for (int i = 0; i < nSize; ++i) { inDims.push_back(ins[i]->val.dim); curDim += inDims[i]; } if (curDim != dim) { std::cout << "input dim size not match" << curDim << "\t" << dim << std::endl; abort(); } cg->addNode(this); } void forward(Graph *cg, PNode x1) { std::vector<PNode> ins = {x1}; forward(cg, ins); } void forward(Graph *cg, PNode x1, PNode x2) { std::vector<PNode> ins = {x1, x2}; forward(cg, ins); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3) { std::vector<PNode> ins = {x1, x2, x3}; forward(cg, ins); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) { std::vector<PNode> ins = {x1, x2, x3, x4}; forward(cg, ins); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5) { std::vector<PNode> ins = {x1, x2, x3, x4, x5}; forward(cg, ins); } void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5, PNode x6) { std::vector<PNode> ins = {x1, x2, x3, x4, x5, x6}; forward(cg, ins); } PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding bool typeEqual(PNode other) { if (!Node::typeEqual(other)) { return false; } ConcatNode *o = static_cast<ConcatNode*>(other); if (inDims.size() != o->inDims.size()) { return false; } for (int i = 0; i < inDims.size(); ++i) { if (inDims.at(i) != o->inDims.at(i)) { return false; } } return true; } size_t typeHashCode() const override { size_t hash_code = Node::typeHashCode() ^ std::hash<int>{}(inDims.size()); int i = 0; for (int dim : inDims) { hash_code ^= (dim << (i++ % 16)); } return hash_code; } void compute() { int nSize = ins.size(); int offset = 0; for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < inDims.at(i); idx++) { val[offset + idx] = ins[i]->val[idx]; } offset += inDims[i]; } } void backward() { int nSize = ins.size(); int offset = 0; for (int i = 0; i < nSize; ++i) { for (int idx = 0; idx < inDims[i]; idx++) { ins[i]->loss[idx] += loss[offset + idx]; } offset += inDims[i]; } } }; #if USE_GPU class ConcatExecute : public Execute { public: int outDim; int inCount; Tensor2D drop_mask; void forward() { int count = batch.size(); drop_mask.init(outDim, count); CalculateDropMask(count, outDim, drop_mask); std::vector<dtype*> in_vals, vals; in_vals.reserve(inCount * count); vals.reserve(count); for (Node *node : batch) { ConcatNode *concat = static_cast<ConcatNode*>(node); for (Node *in : concat->ins) { in_vals.push_back(in->val.value); } vals.push_back(node->val.value); } n3ldg_cuda::ConcatForward(in_vals, static_cast<ConcatNode*>(batch.at(0))->inDims, vals, bTrain, drop_mask.value, dynamicDropValue(), count, inCount, outDim); #if TEST_CUDA if (initialDropValue() > 0) { drop_mask.copyFromDeviceToHost(); for (int i = 0; i < count; ++i) { for (int j = 0; j < outDim; ++j) { dtype v = drop_mask[j][i]; batch[i]->drop_mask[j] = v <= dynamicDropValue() ? 0 : 1; } } } for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); if (initialDropValue() > 0) { batch[idx]->forward_drop(bTrain, drop_factor); } n3ldg_cuda::Assert(batch[idx]->val.verify("concat forward")); } #endif } void backward() { int count = batch.size(); std::vector<dtype*> in_losses, losses; in_losses.reserve(inCount * count); losses.reserve(count); for (Node *node : batch) { ConcatNode *concat = static_cast<ConcatNode*>(node); for (Node *in : concat->ins) { in_losses.push_back(in->loss.value); } losses.push_back(node->loss.value); } n3ldg_cuda::ConcatBackward(in_losses, static_cast<ConcatNode*>(batch.at(0))->inDims, losses, drop_mask.value, dynamicDropValue(), count, inCount, outDim); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { if (initialDropValue() > 0) { batch[idx]->backward_drop(); } batch[idx]->backward(); } for (int idx = 0; idx < count; idx++) { for (int j = 0; j < inCount; ++j) { n3ldg_cuda::Assert(static_cast<ConcatNode *>(batch[idx])-> ins[j]->loss.verify("concat backward")); } } #endif } }; #else class ConcatExecute : public Execute { public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; #endif inline PExecute ConcatNode::generate(bool bTrain, dtype cur_drop_factor) { ConcatExecute* exec = new ConcatExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; #if USE_GPU exec->inCount = this->ins.size(); exec->outDim = 0; for (int d : inDims) { exec->outDim += d; } #endif return exec; } #endif
demos.h
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Include/demos.h: include file for all demo programs //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #ifndef GRAPHBLAS_DEMOS_H #define GRAPHBLAS_DEMOS_H #include "GraphBLAS.h" #include "simple_rand.h" #include "simple_timer.h" #include "usercomplex.h" #ifdef MATLAB_MEX_FILE #include "mex.h" #include "matrix.h" #define malloc mxMalloc #define free mxFree #define calloc mxCalloc #define realloc mxRealloc #endif //------------------------------------------------------------------------------ // manage compiler warnings //------------------------------------------------------------------------------ #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wformat-truncation=" #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wunused-result" #pragma GCC diagnostic ignored "-Wint-in-bool-context" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wtype-limits" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" // enable these warnings as errors #pragma GCC diagnostic error "-Wmisleading-indentation" #pragma GCC diagnostic error "-Wswitch-default" #endif #undef MIN #undef MAX #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) GrB_Info bfs5m // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs5m_check // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6 // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6_check // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info read_matrix // read a double-precision matrix ( GrB_Matrix *A, // handle of matrix to create FILE *f, // file to read the tuples from bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then remove self edges from A bool one_based, // if true, input matrix is 1-based bool boolean, // if true, input is GrB_BOOL, otherwise GrB_FP64 bool printstuff // if true, print status to stdout ) ; GrB_Info mis // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; GrB_Info mis_check // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; void mis_score (double *result, uint32_t *degree) ; extern int32_t level ; #pragma omp threadprivate(level) void bfs_level (int32_t *result, bool *element) ; GrB_Info random_matrix // create a random double-precision matrix ( GrB_Matrix *A_output, // handle of matrix to create bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then do not create self edges int64_t nrows, // number of rows int64_t ncols, // number of columns int64_t ntuples, // number of entries (x2 if made symmetric) int method, // method to use: 0:setElement, 1:build bool A_complex // if true, create a Complex matrix ) ; GrB_Info get_matrix // get a matrix from stdin, or create random one ( GrB_Matrix *A_output, // matrix to create int argc, // command-line arguments char **argv, bool no_self_edges, // if true, ensure the matrix has no self-edges bool boolean // if true, file is read as GrB_BOOL, else GrB_FP64 ) ; GrB_Info wathen // construct a random Wathen matrix ( GrB_Matrix *A_output, // output matrix int64_t nx, // grid dimension nx int64_t ny, // grid dimension ny bool scale, // if true, scale the rows int method, // 0 to 3 double *rho_given // nx-by-ny dense matrix, if NULL use random rho ) ; GrB_Info triu // C = triu (A,1) ( GrB_Matrix *C_output, // output matrix const GrB_Matrix A // input matrix, boolean or double ) ; GrB_Info tricount // count # of triangles ( int64_t *ntri, // # of triangles in the graph const int method, // 0 to 4, see above const GrB_Matrix A, // adjacency matrix for methods 0, 1, and 2 const GrB_Matrix E, // edge incidence matrix for method 0 const GrB_Matrix L, // L=tril(A) for methods 2, 4, and 4 const GrB_Matrix U, // U=triu(A) for methods 2, 3, and 5 double t [2] // t [0]: multiply time, t [1]: reduce time ) ; GrB_Info isequal_type // return GrB_SUCCESS if successful ( bool *result, // true if A == B, false if A != B or error GrB_Matrix A, GrB_Matrix B, GrB_BinaryOp op // should be GrB_EQ_<type>, for the type of A and B ) ; GrB_Info isequal // return GrB_SUCCESS if successful ( bool *result, // true if A == B, false if A != B or error GrB_Matrix A, GrB_Matrix B, GrB_BinaryOp userop // for A and B with user-defined types. ignored // if A and B are of built-in types ) ; //------------------------------------------------------------------------------ // page rank //------------------------------------------------------------------------------ // dpagerank computes an array of structs for its result typedef struct { double pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } PageRank ; // ipagerank computes an array of structs for its result typedef struct { uint64_t pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } iPageRank ; // using a standard semiring and FP64 arithmetic GrB_Info dpagerank // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A ) ; // like dpagerank but with user-defined type, operators, and semiring; // also a stopping critirion GrB_Info dpagerank2 // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A, // input graph, not modified int itermax, // max number of iterations double tol, // stop when norm (r-rnew,2) < tol int *iters, // number of iterations taken GrB_Desc_Value method // method to use for GrB_vxm (for testing only) ) ; GrB_Info drowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; GrB_Info ipagerank // GrB_SUCCESS or error condition ( iPageRank **Phandle, // output: pointer to array of iPageRank structs GrB_Matrix A // input graph, not modified ) ; GrB_Info irowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; // multiplicative scaling factor for ipagerank, ZSCALE = 2^30 #define ZSCALE ((uint64_t) 1073741824) //------------------------------------------------------------------------------ // import/export test //------------------------------------------------------------------------------ GrB_Info import_test (GrB_Matrix *C_handle, int format, bool dump) ; //------------------------------------------------------------------------------ // CHECK: expr must be true; if not, return an error condition //------------------------------------------------------------------------------ // the #include'ing file must define the FREE_ALL macro #define CHECK(expr,info) \ { \ if (! (expr)) \ { \ /* free the result and all workspace, and return NULL */ \ FREE_ALL ; \ printf ("Failure: line %d file %s\n", __LINE__, __FILE__) ; \ return (info) ; \ } \ } //------------------------------------------------------------------------------ // OK: call a GraphBLAS method and check the result //------------------------------------------------------------------------------ // OK(method) is a macro that calls a GraphBLAS method and checks the status; // if a failure occurs, it handles the error via the CHECK macro above, and // returns the error status to the caller. #define OK(method) \ { \ info = method ; \ if (info != GrB_SUCCESS) \ { \ printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \ CHECK (false, info) ; \ } \ } #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar *lhs, Index lhsStride, const RhsScalar *rhs, Index rhsStride, ResScalar *res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar, LhsScalar> &blocking, GemmParallelInfo <Index> *info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking, info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar *_lhs, Index lhsStride, const RhsScalar *_rhs, Index rhsStride, ResScalar *res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar, RhsScalar> &blocking, GemmParallelInfo <Index> *info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs, lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs, rhsStride); typedef gebp_traits<LhsScalar, RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) { #pragma omp atomic info[j].users -= 1; } } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc * mc; std::size_t sizeB = kc * cols; std::size_t sizeW = kc * Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for (Index k2 = 0; k2 < depth; k2 += kc) { const Index actual_kc = (std::min)(k2 + kc, depth) - k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2, 0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for (Index i2 = 0; i2 < rows; i2 += mc) { const Index actual_mc = (std::min)(i2 + mc, rows) - i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res + i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct < Lhs, Rhs, GemmProduct> > : traits<ProductBase < GeneralProduct < Lhs, Rhs, GemmProduct>, Lhs, Rhs> > { }; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha, BlockingType &blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator()(Index row, Index rows, Index col = 0, Index cols = -1, GemmParallelInfo <Index> *info = 0) const { if (cols == -1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row, 0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0, col), m_rhs.outerStride(), (Scalar *) &(m_dest.coeffRef(row, col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs &m_lhs; const Rhs &m_rhs; Dest &m_dest; Scalar m_actualAlpha; BlockingType &m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1, bool FiniteAtCompileTime = MaxRows != Dynamic && MaxCols != Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar *m_blockA; RhsScalar *m_blockB; RhsScalar *m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar *blockA() { return m_blockA; } inline RhsScalar *blockB() { return m_blockB; } inline RhsScalar *blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type, typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> { enum { Transpose = StorageOrder == RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar; typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar; typedef gebp_traits <LhsScalar, RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type, typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> { enum { Transpose = StorageOrder == RowMajor }; typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar; typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar; typedef gebp_traits <LhsScalar, RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc * Traits::WorkSpaceFactor; } void allocateA() { if (this->m_blockA == 0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if (this->m_blockB == 0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if (this->m_blockW == 0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs, Rhs, GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime, Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs &lhs, const Rhs &rhs) : Base(lhs, rhs) { #if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) typedef internal::scalar_product_op <LhsScalar, RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp, LhsScalar, RhsScalar); #endif } template<typename Dest> void scaleAndAddTo(Dest &dst, const Scalar &alpha) const { eigen_assert(dst.rows() == m_lhs.rows() && dst.cols() == m_rhs.cols()); if (m_lhs.cols() == 0 || m_lhs.rows() == 0 || m_rhs.cols() == 0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor, LhsScalar, RhsScalar, Dest::MaxRowsAtCompileTime, Dest::MaxColsAtCompileTime, MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor < Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags & RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime > 32 || Dest::MaxRowsAtCompileTime == Dynamic)>( GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags & RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
linalg.h
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal * * This file is part of SPAMS. * * SPAMS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SPAMS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SPAMS. If not, see <http://www.gnu.org/licenses/>. */ /* \file * toolbox Linalg * * by Julien Mairal * julien.mairal@inria.fr * * File linalg.h * \brief Contains Matrix, Vector classes */ #ifndef LINALG_H #define LINALG_H #include "misc.h" #include "cblas_alt_template.h" #include <fstream> #ifdef WINDOWS #include <string> #else #include <cstring> #endif #include "utils.h" #undef max #undef min /// Dense Matrix class template<typename T> class Matrix; /// Sparse Matrix class template<typename T, typename I = INTM> class SpMatrix; /// Dense Vector class template<typename T> class Vector; /// Sparse Vector class template<typename T, typename I = INTM> class SpVector; template <typename T> static inline bool isZero(const T lambda) { return static_cast<double>(abs<T>(lambda)) < 1e-99; } template <typename T> static inline bool isEqual(const T lambda1, const T lambda2) { return static_cast<double>(abs<T>(lambda1-lambda2)) < 1e-99; } template <typename T> static inline T softThrs(const T x, const T lambda) { if (x > lambda) { return x-lambda; } else if (x < -lambda) { return x+lambda; } else { return 0; } }; template <typename T> static inline T fastSoftThrs(const T x, const T lambda) { return x + T(0.5)*(abs<T>(x-lambda) - abs<T>(x+lambda)); }; template <typename T> static inline T hardThrs(const T x, const T lambda) { return (x > lambda || x < -lambda) ? x : 0; }; template <typename T> static inline T xlogx(const T x) { if (x < -1e-20) { return INFINITY; } else if (x < 1e-20) { return 0; } else { return x*alt_log<T>(x); } } template <typename T> static inline T logexp(const T x) { if (x < -30) { return 0; } else if (x < 30) { return alt_log<T>( T(1.0) + exp_alt<T>( x ) ); } else { return x; } } template <typename T> static inline T logexp2(const T x) { return (x > 0) ? x + log_alt<T>(T(1.0)+ exp_alt<T>(-x)) : log( T(1.0) + exp_alt<T>( x ) ); } template <typename T> static T solve_binomial(const T a, const T b, const T c) { const T delta = b*b-4*a*c; return (-b + alt_sqrt<T>(delta))/(2*a); // returns single largest solution, assiming delta > 0; }; template <typename T> static T solve_binomial2(const T a, const T b, const T c) { const T delta = b*b-4*a*c; return (-b - alt_sqrt<T>(delta))/(2*a); // returns single largest solution, assiming delta > 0; }; /// Class Matrix template<typename T> class Matrix { friend class SpMatrix<T>; public: typedef T value_type; typedef Vector<T> col_type; typedef INTM index_type; typedef Vector<T> element; /// Constructor with existing data X of an m x n matrix Matrix(T* X, INTM m, INTM n); /// Constructor for a new m x n matrix Matrix(INTM m, INTM n); /// Empty constructor Matrix(); /// Destructor virtual ~Matrix(); /// Accessors /// Number of rows inline INTM m() const { return _m; }; /// Number of columns inline INTM n() const { return _n; }; /// size inline INTM size() const { return _n*_m; }; /// Return a modifiable reference to X(i,j) inline T& operator()(const INTM i, const INTM j); /// Return the value X(i,j) inline T operator()(const INTM i, const INTM j) const; /// Return a modifiable reference to X(i) (1D indexing) inline T& operator[](const INTM index) { return _X[index]; }; /// Return the value X(i) (1D indexing) inline T operator[](const INTM index) const { return _X[index]; }; /// Copy the column i into x inline void copyCol(const INTM i, Vector<T>& x) const; /// Copy the column i into x inline void copyRow(const INTM i, Vector<T>& x) const; inline void scalRow(const INTM i, const T s) const; inline void copyToRow(const INTM i, const Vector<T>& x); /// Copy the column i into x inline void extract_rawCol(const INTM i, T* x) const; /// Copy the column i into x virtual void add_rawCol(const INTM i, T* DtXi, const T a) const; /// Copy the column i into x inline void getData(Vector<T>& data, const INTM i) const; /// Reference the column i into the vector x inline void refCol(INTM i, Vector<T>& x) const; /// Reference the column i to i+n into the Matrix mat inline void refSubMat(INTM i, INTM n, Matrix<T>& mat) const; /// extract a sub-matrix of a symmetric matrix inline void subMatrixSym(const Vector<INTM>& indices, Matrix<T>& subMatrix) const; /// reference a modifiable reference to the data, DANGEROUS inline T* rawX() const { return _X; }; /// return a non-modifiable reference to the data inline const T* X() const { return _X; }; /// make a copy of the matrix mat in the current matrix inline void copy(const Matrix<T>& mat); /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { mat.copy(*this); }; /// make a copy of the matrix mat in the current matrix inline void copyRef(const Matrix<T>& mat); /// Debugging function /// Print the matrix to std::cout inline void print(const string& name) const; inline void dump(const string& name) const; /// Modifiers /// clean a dictionary matrix inline void clean(); /// Resize the matrix inline void resize(INTM m, INTM n, const bool set_zeros = true); /// Change the data in the matrix inline void setData(T* X, INTM m, INTM n); /// Change the data in the matrix inline void refData(const Matrix<T>& mat) { this->setData(mat.rawX(),mat.m(),mat.n()); }; /// modify _m inline void setm(const INTM m) { _m = m; }; //DANGEROUS /// modify _n inline void setn(const INTM n) { _n = n; }; //DANGEROUS /// Set all the values to zero inline void setZeros(); /// Set all the values to a scalar inline void set(const T a); /// Clear the matrix inline void clear(); /// Put white Gaussian noise in the matrix inline void setAleat(); /// set the matrix to the identity; inline void eye(); /// Normalize all columns to unit l2 norm inline void normalize(); /// Normalize all columns which l2 norm is greater than one. inline void normalize2(); /// center the columns of the matrix inline void center(); /// center the columns of the matrix inline void center_rows(); /// center the columns of the matrix inline void normalize_rows(); /// center the columns of the matrix and keep the center values inline void center(Vector<T>& centers); /// scale the matrix by the a inline void scal(const T a); /// make the matrix symmetric by copying the upper-right part /// into the lower-left part inline void fillSymmetric(); inline void fillSymmetric2(); /// change artificially the size of the matrix, DANGEROUS inline void fakeSize(const INTM m, const INTM n) { _n = n; _m=m;}; /// whiten inline void whiten(const INTM V); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void sum_cols(Vector<T>& sum) const; /// Analysis functions /// Check wether the columns of the matrix are normalized or not inline bool isNormalized() const; /// return the 1D-index of the value of greatest magnitude inline INTM fmax() const; /// return the 1D-index of the value of greatest magnitude inline T fmaxval() const; /// return the 1D-index of the value of lowest magnitude inline INTM fmin() const; // Algebric operations /// Transpose the current matrix and put the result in the matrix /// trans inline void transpose(Matrix<T>& trans) const; /// A <- -A inline void neg(); /// add one to the diagonal inline void incrDiag(); inline void addDiag(const Vector<T>& diag); inline void addDiag(const T diag); inline void addToCols(const Vector<T>& diag); inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). inline void svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const; inline void singularValues(Vector<T>& u) const; inline void svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const; inline void svd2(Matrix<T>& U, Vector<T>& S, const int num = -1, const int method = 0) const; inline void SymEig(Matrix<T>& U, Vector<T>& S) const; inline void InvsqrtMat(Matrix<T>& out, const T lambda = 0) const; inline void sqrtMat(Matrix<T>& out) const; // inline void Inv(Matrix<T>& out) const; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method inline void eigLargestSymApprox(const Vector<T>& u0, Vector<T>& u) const; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. inline T eigLargestMagnSym(const Vector<T>& u0, Vector<T>& u) const; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. inline T eigLargestMagnSym() const; /// inverse the matrix when it is symmetric inline void invSym(); inline void invSymPos(); /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const; /// perform b = A'x, when x is sparse template <typename I> inline void multTrans(const SpVector<T,I>& x, Vector<T>& b, const T alpha =1.0, const T beta = 0.0) const; /// perform b = alpha*A*x+beta*b inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; inline void mult_loop(const Vector<T>& x, Vector<T>& b) const; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename I> inline void mult(const SpVector<T,I>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; template <typename I> inline void mult_loop(const SpVector<T,I>& x, Vector<T>& b) const { this->mult(x,b); } /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = A*B, when B is sparse template <typename I> inline void mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// mult by a diagonal matrix on the left inline void multDiagLeft(const Vector<T>& diag); /// mult by a diagonal matrix on the right inline void multDiagRight(const Vector<T>& diag); /// mult by a diagonal matrix on the right inline void AddMultDiagRight(const Vector<T>& diag, Matrix<T>& mat); /// C = A .* B, elementwise multiplication inline void mult_elementWise(const Matrix<T>& B, Matrix<T>& C) const; inline void div_elementWise(const Matrix<T>& B, Matrix<T>& C) const; /// XtX = A'*A inline void XtX(Matrix<T>& XtX) const; /// XXt = A*A' inline void XXt(Matrix<T>& XXt) const; /// XXt = A*A' where A is an upper triangular matrix inline void upperTriXXt(Matrix<T>& XXt, const INTM L) const; /// extract the diagonal inline void diag(Vector<T>& d) const; /// set the diagonal inline void setDiag(const Vector<T>& d); /// set the diagonal inline void setDiag(const T val); /// each element of the matrix is replaced by its exponential inline void exp(); /// each element of the matrix is replaced by its square root inline void pow(const T a); inline void Sqrt(); inline void Invsqrt(); inline void sqr(); /// return vec1'*A*vec2, where vec2 is sparse template <typename I> inline T quad(const Vector<T>& vec1, const SpVector<T,I>& vec2) const; /// return vec1'*A*vec2, where vec2 is sparse template <typename I> inline void quad_mult(const Vector<T>& vec1, const SpVector<T,I>& vec2, Vector<T>& y, const T a = 1.0, const T b = 0.0) const; /// return vec'*A*vec when vec is sparse template <typename I> inline T quad(const SpVector<T,I>& vec) const; /// add alpha*mat to the current matrix inline void add(const Matrix<T>& mat, const T alpha = 1.0); /// add alpha*mat to the current matrix inline void add_scal(const Matrix<T>& mat, const T alpha = 1.0, const T beta = 1.0); /// add alpha to the current matrix inline void add(const T alpha); /// add alpha*mat to the current matrix inline T dot(const Matrix<T>& mat) const; /// substract the matrix mat to the current matrix inline void sub(const Matrix<T>& mat); /// inverse the elements of the matrix inline void inv_elem(); /// inverse the elements of the matrix inline void inv() { this->inv_elem(); }; /// return the trace of the matrix inline T trace() const; /// compute the sum of the magnitude of the matrix values inline T asum() const; /// compute the sum of the magnitude of the matrix values inline T sum() const; /// return ||A||_F inline T normF() const; /// whiten inline T mean() const; /// whiten inline T abs_mean() const; /// whiten /// return ||A||_F^2 inline T normFsq() const; /// return ||A||_F^2 inline T nrm2sq() const { return this->normFsq(); }; /// return ||At||_{inf,2} (max of l2 norm of the columns) inline T norm_inf_2_col() const; /// return ||At||_{1,2} (max of l2 norm of the columns) inline T norm_1_2_col() const; /// returns the l2 norms of the columns inline void norm_2_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_cols(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_l1_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void get_sum_cols(Vector<T>& sum) const; /// returns the linf norms of the columns inline void dot_col(const Matrix<T>& mat, Vector<T>& dots) const; /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2sq_rows(Vector<T>& norms) const; inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void softThrshold(const T nu); inline void fastSoftThrshold(const T nu); inline void fastSoftThrshold(Matrix<T>& output, const T nu) const; inline void hardThrshold(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void thrsPos(); /// perform A <- A + alpha*vec1*vec2' inline void rank1Update(const Vector<T>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename I> inline void rank1Update(const SpVector<T,I>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse template <typename I> inline void rank1Update(const Vector<T>& vec1, const SpVector<T,I>& vec2, const T alpha = 1.0); template <typename I> inline void rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T,I>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse template <typename I> inline void rank1Update(const SpVector<T,I>& vec, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse template <typename I> inline void rank1Update(const SpVector<T,I>& vec, const SpVector<T,I>& vec2, const T alpha = 1.0); /// Compute the mean of the columns inline void meanCol(Vector<T>& mean) const; /// Compute the mean of the rows inline void meanRow(Vector<T>& mean) const; /// fill the matrix with the row given inline void fillRow(const Vector<T>& row); /// fill the matrix with the row given inline void extractRow(const INTM i, Vector<T>& row) const; inline void setRow(const INTM i, const Vector<T>& row); inline void addRow(const INTM i, const Vector<T>& row, const T a=1.0); /// compute x, such that b = Ax, WARNING this function needs to be u /// updated inline void conjugateGradient(const Vector<T>& b, Vector<T>& x, const T tol = 1e-4, const int = 4) const; /// compute x, such that b = Ax, WARNING this function needs to be u /// updated, the temporary vectors are given. inline void drop(char* fileName) const; /// compute a Nadaraya Watson estimator inline void NadarayaWatson(const Vector<INTM>& ind, const T sigma); /// performs soft-thresholding of the vector inline void blockThrshold(const T nu, const INTM sizeGroup); /// performs sparse projections of the columns inline void sparseProject(Matrix<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false, const int numThreads=-1); inline void transformFilter(); /// Conversion /// make a sparse copy of the current matrix inline void toSparse(SpMatrix<T>& matrix) const; /// make a sparse copy of the current matrix inline void toSparseTrans(SpMatrix<T>& matrixTrans); /// make a reference of the matrix to a vector vec inline void toVect(Vector<T>& vec) const; /// Accessor inline INTM V() const { return 1;}; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Matrix<T>& out, Vector<bool>& mask) const; typedef Vector<T> col; static const bool is_sparse = false; protected: /// Forbid lazy copies explicit Matrix<T>(const Matrix<T>& matrix); /// Forbid lazy copies Matrix<T>& operator=(const Matrix<T>& matrix); /// is the data allocation external or not bool _externAlloc; /// pointer to the data T* _X; /// number of rows INTM _m; /// number of columns INTM _n; }; /// Class for dense vector template<typename T> class Vector { friend class SpMatrix<T>; friend class Matrix<T>; friend class SpVector<T>; public: typedef T value_type; typedef T element; /// Empty constructor Vector(); /// Constructor. Create a new vector of size n Vector(INTM n); /// Constructor with existing data Vector(T* X, INTM n); /// Copy constructor explicit Vector<T>(const Vector<T>& vec); /// Destructor virtual ~Vector(); /// Accessors /// Print the vector to std::cout inline void print(const char* name) const; inline void dump(const string& name) const; /// returns the index of the largest value inline INTM max() const; /// returns the index of the minimum value inline INTM min() const; /// returns the maximum value inline T maxval() const; /// returns the minimum value inline T minval() const; /// returns the index of the value with largest magnitude inline INTM fmax() const; /// returns the index of the value with smallest magnitude inline INTM fmin() const; /// returns the maximum magnitude inline T fmaxval() const; /// returns the minimum magnitude inline T fminval() const; /// returns a reference to X[index] inline T& operator[](const INTM index); /// returns X[index] inline T operator[](const INTM index) const; /// make a copy of x inline void copy(const Vector<T>& x); inline void copyRef(const Vector<T>& x); /// returns the size of the vector inline int n() const { return _n; }; /// returns the size of the vector inline int size() const { return _n; }; /// returns a modifiable reference of the data, DANGEROUS inline T* rawX() const { return _X; }; /// change artificially the size of the vector, DANGEROUS inline void fakeSize(const INTM n) { _n = n; }; /// generate logarithmically spaced values inline void logspace(const INTM n, const T a, const T b); inline INTM nnz() const; /// Modifiers /// Set all values to zero inline void setZeros(); /// resize the vector inline void resize(const INTM n, const bool set_zeros = true); /// change the data of the vector inline void setPointer(T* X, const INTM n); inline void setData(T* X, const INTM n) { this->setPointer(X,n); }; inline void refData(const Vector<T>& vec) { this->setPointer(vec.rawX(),vec.n()); }; inline void refSubVec(INTM i, INTM n, Vector<T>& mat) const { mat.setData(_X+i,n); }; //inline void print(const char* name) const; inline void print(const string& name) const; /// put a random permutation of size n (for integral vectors) inline void randperm(int n); /// put a random permutation of size n (for integral vectors) inline void randi(int n); /// put random values in the vector (White Gaussian Noise) inline void setAleat(); /// clear the vector inline void clear(); /// performs soft-thresholding of the vector inline void softThrshold(const T nu); inline void fastSoftThrshold(const T nu); inline void fastSoftThrshold(Vector<T>& out, const T nu) const; inline void softThrsholdScal(Vector<T>& out, const T nu, const T s); inline void hardThrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// performs soft-thresholding of the vector inline void thrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsPos(); /// set each value of the vector to val inline void set(const T val); inline void setn(const INTM n) { _n = n; }; //DANGEROUS inline bool alltrue() const; inline bool allfalse() const; /// Algebric operations /// returns ||A||_2 inline T nrm2() const; /// returns ||A||_2^2 inline T nrm2sq() const; /// returns A'x inline T dot(const Vector<T>& x) const; /// returns A'x, when x is sparse template <typename I> inline T dot(const SpVector<T,I>& x) const; /// A <- A + a*x inline void add(const Vector<T>& x, const T a = 1.0); /// A <- A + a*x template <typename I> inline void add(const SpVector<T,I>& x, const T a = 1.0); /// adds a to each value in the vector inline void add(const T a); /// A <- b*A + a*x inline void add_scal(const Vector<T>& x, const T a = 1.0, const T b = 0); /// A <- b*A + a*x template <typename I> inline void add_scal(const SpVector<T,I>& x, const T a = 1.0, const T b = 0); /// A <- A - x inline void sub(const Vector<T>& x); /// A <- A + a*x template <typename I> inline void sub(const SpVector<T,I>& x); /// A <- A ./ x inline void div(const Vector<T>& x); /// A <- x ./ y inline void div(const Vector<T>& x, const Vector<T>& y); /// A <- x .^ 2 inline void sqr(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void sqr(); /// A <- 1 ./ sqrt(A) inline void Sqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void Sqrt(); /// A <- 1 ./ sqrt(x) inline void Invsqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(A) inline void Invsqrt(); /// A <- 1./x inline void inv(const Vector<T>& x); /// A <- 1./A inline void inv(); /// A <- x .* y inline void mult(const Vector<T>& x, const Vector<T>& y); inline void mult_elementWise(const Vector<T>& B, Vector<T>& C) const { C.mult(*this,B); }; /// normalize the vector inline void normalize(); /// normalize the vector inline void normalize2(const T thrs = 1.0); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void whiten(const INTM V); /// whiten inline T mean() const; inline T abs_mean() const; inline T mean_non_uniform(const Vector<T>& qi) const; /// whiten inline T std(); /// compute the Kuhlback-Leiber divergence inline T KL(const Vector<T>& X); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// scale the vector by a inline void scal(const T a); /// A <- -A inline void neg(); /// replace each value by its exponential inline void exp(); /// replace each value by its logarithm inline void log(); /// replace each value by its absolute value inline void abs_vec(); /// replace each value by its exponential inline void logexp(); /// replace each value by its exponential inline T softmax(const int y); inline T logsumexp(); /// computes the sum of the magnitudes of the vector inline T asum() const; inline T lzero() const; /// compute the sum of the differences inline T afused() const; /// returns the sum of the vector inline T sum() const; /// puts in signs, the sign of each point in the vector inline void sign(Vector<T>& signs) const; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1project(Vector<T>& out, const T thrs, const bool simplex = false) const; inline void l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual = false) const; inline void l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos = false, const int mode = 1); inline void sparseProject(Vector<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false); inline void project_sft(const Vector<int>& labels, const int clas); inline void project_sft_binary(const Vector<T>& labels); /// projects the vector onto the l1 ball of radius thrs, /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos = false) const; inline void fusedProject(Vector<T>& out, const T lambda1, const T lambda2, const int itermax); inline void fusedProjectHomotopy(Vector<T>& out, const T lambda1,const T lambda2,const T lambda3 = 0, const bool penalty = true); /// projects the vector onto the l1 ball of radius thrs, /// _sort the vector inline void sort(Vector<T>& out, const bool mode) const; /// sort the vector inline void sort(const bool mode); //// sort the vector inline void sort2(Vector<T>& out, Vector<INTM>& key, const bool mode) const; /// sort the vector inline void sort2(Vector<INTM>& key, const bool mode); /// sort the vector inline void applyBayerPattern(const int offset); /// Conversion /// make a sparse copy inline void toSparse(SpVector<T>& vec) const; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Vector<T>& out, Vector<bool>& mask) const; inline void getIndices(Vector<int>& ind) const { }; // irrelevant for dense vectors template <typename I> inline void refIndices(Vector<I>& ind) const { }; // irrelevant for dense vectors private: /// = operator, Vector<T>& operator=(const Vector<T>& vec); /// if the data has been externally allocated bool _externAlloc; /// data T* _X; /// size of the vector INTM _n; }; /// Sparse Matrix class, CSC format template<typename T, typename I> class SpMatrix { friend class Matrix<T>; friend class SpVector<T,I>; public: typedef T value_type; typedef SpVector<T,I> col_type; typedef I index_type; /// Constructor, CSC format, existing data SpMatrix(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax); /// Constructor, new m x n matrix, with at most nzmax non-zeros values SpMatrix(I m, I n, I nzmax); /// Empty constructor SpMatrix(); /// Destructor ~SpMatrix(); /// Accessors /// reference the column i Io vec inline void refCol(I i, SpVector<T,I>& vec) const; /// returns pB[i] inline I pB(const I i) const { return _pB[i]; }; /// returns r[i] inline I r(const I i) const { return _r[i]; }; /// returns v[i] inline T v(const I i) const { return _v[i]; }; /// returns the maximum number of non-zero elements inline I nzmax() const { return _nzmax; }; /// returns the number of rows inline I n() const { return _n; }; /// returns the number of columns inline I m() const { return _m; }; /// returns the number of columns inline I V() const { return 1; }; /// returns X[index] inline T operator[](const I index) const; void getData(Vector<T>& data, const I index) const; void setData(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax); /// print the sparse matrix inline void print(const string& name) const; /// compute the sum of the matrix elements inline T asum() const; /// compute the sum of the matrix elements inline T normFsq() const; /// Direct access to _pB inline I* pB() const { return _pB; }; /// Direct access to _pE inline I* pE() const { return _pE; }; /// Direct access to _r inline I* r() const { return _r; }; /// Direct access to _v inline T* v() const { return _v; }; /// number of nonzeros elements inline I nnz() const { return _pB[_n]; }; inline void add_direct(const SpMatrix<T,I>& mat, const T a); inline void copy_direct(const SpMatrix<T,I>& mat); inline T dot_direct(const SpMatrix<T,I>& mat) const; /// Modifiers /// clear the matrix inline void clear(); /// resize the matrix inline void resize(const I m, const I n, const I nzmax); /// scale the matrix by a inline void scal(const T a) const; inline T abs_mean() const; /// Algebraic operations /// aat <- A*A' inline void AAt(Matrix<T>& aat) const; /// aat <- A(:,indices)*A(:,indices)' inline void AAt(Matrix<T>& aat, const Vector<I>& indices) const; /// aat <- sum_i w_i A(:,i)*A(:,i)' inline void wAAt(const Vector<T>& w, Matrix<T>& aat) const; /// XAt <- X*A' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt) const; /// XAt <- X(:,indices)*A(:,indices)' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<I>& indices) const; /// XAt <- sum_i w_i X(:,i)*A(:,i)' inline void wXAt( const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numthreads=-1) const; inline void XtX(Matrix<T>& XtX) const; /// y <- A'*x inline void multTrans(const Vector<T>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; inline void multTrans(const SpVector<T,I>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const SpVector<T,I>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { this->toFull(mat); }; /// dot product; inline T dot(const Matrix<T>& x) const; inline void copyRow(const I i, Vector<T>& x) const; inline void sum_cols(Vector<T>& sum) const; inline void copy(const SpMatrix<T,I>& mat); /// Conversions /// copy the sparse matrix into a dense matrix inline void toFull(Matrix<T>& matrix) const; /// copy the sparse matrix into a dense transposed matrix inline void toFullTrans(Matrix<T>& matrix) const; /// use the data from v, r for _v, _r inline void convert(const Matrix<T>&v, const Matrix<I>& r, const I K); /// use the data from v, r for _v, _r inline void convert2(const Matrix<T>&v, const Vector<I>& r, const I K); inline void normalize(); inline void normalize_rows(); /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l0 norms of the columns inline void norm_0_cols(Vector<T>& norms) const; /// returns the l1 norms of the columns inline void norm_1_cols(Vector<T>& norms) const; inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); inline void addVecToColsWeighted(const Vector<T>& diag, const T* weights, const T a = 1.0); typedef SpVector<T,I> col; static const bool is_sparse = true; private: /// forbid copy constructor explicit SpMatrix(const SpMatrix<T,I>& matrix); SpMatrix<T,I>& operator=(const SpMatrix<T,I>& matrix); /// if the data has been externally allocated bool _externAlloc; /// data T* _v; /// row indices I* _r; /// indices of the beginning of columns I* _pB; /// indices of the end of columns I* _pE; /// number of rows I _m; /// number of columns I _n; /// number of non-zero values I _nzmax; }; /// Sparse vector class template <typename T, typename I> class SpVector { friend class Matrix<T>; friend class SpMatrix<T,I>; friend class Vector<T>; public: typedef T value_type; /// Constructor, of the sparse vector of size L. SpVector(T* v, I* r, I L, I nzmax); /// Constructor, allocates nzmax slots SpVector(I nzmax); /// Empty constructor SpVector(); /// Destructor ~SpVector(); /// Accessors /// returns the length of the vector inline T nzmax() const { return _nzmax; }; /// returns the length of the vector inline T length() const { return _L; }; /// computes the sum of the magnitude of the elements inline T asum() const; /// computes the l2 norm ^2 of the vector inline T nrm2sq() const; /// computes the l2 norm of the vector inline T nrm2() const; /// computes the linf norm of the vector inline T fmaxval() const; /// print the vector to std::cerr inline void print(const string& name) const; inline void refIndices(Vector<I>& indices) const; /// creates a reference on the vector val inline void refVal(Vector<T>& val) const; /// access table r inline I r(const I i) const { return _r[i]; }; /// access table r inline T v(const I i) const { return _v[i]; }; inline T* rawX() const { return _v; }; inline I* rawR() const { return _r; }; /// inline I L() const { return _L; }; /// inline void setL(const I L) { _L=L; }; /// a <- a.^2 inline void sqr(); /// dot product inline T dot(const SpVector<T,I>& vec) const; /// dot product inline T dot(const Vector<T>& vec) const; /// dot product inline void scal(const T a); /// Modifiers /// clears the vector inline void clear(); /// resizes the vector inline void resize(const I nzmax); /// resize the vector as a sparse matrix void inline toSpMatrix(SpMatrix<T,I>& out, const I m, const I n) const; /// resize the vector as a sparse matrix void inline toFull(Vector<T>& out) const; inline void getIndices(Vector<int>& ind) const; private: /// forbids lazy copies explicit SpVector(const SpVector<T,I>& vector); SpVector<T,I>& operator=(const SpVector<T,I>& vector); /// external allocation bool _externAlloc; /// data T* _v; /// indices I* _r; /// length I _L; /// maximum number of nonzeros elements I _nzmax; }; /// Class for dense vector template<typename T, typename I> class LazyVector { public: LazyVector(Vector<T>& x, const Vector<T>& z, const int n) : _x(x), _z(z), _n(n+1), _p(x.n()) { _current_time=0; _dates.resize(_p); _dates.setZeros(); _stats1.resize(n+1); _stats2.resize(n+1); _stats1[0]=T(1.0); _stats2[0]=0; }; void inline update() { for (int ii=0; ii<_p; ++ii) { update(ii); } _current_time=0; _dates.setZeros(); }; void inline update(const I ind) { const int last_time=_dates[ind]; if (last_time != _current_time) { _x[ind] = (_stats1[_current_time]/_stats1[last_time])*_x[ind] + _stats1[_current_time]*(_stats2[_current_time]-_stats2[last_time])*_z[ind]; _dates[ind]=_current_time; } }; void inline update(const Vector<I>& indices) { const int p = indices.n(); for (int ii=0; ii<p; ++ii) { update(indices[ii]); } }; void inline add_scal(const T a, const T b) { // performs x <- a(x - b z) if (_current_time == _n) update(); _current_time++; _stats2[_current_time]=_stats2[_current_time-1] + a/_stats1[_current_time-1]; _stats1[_current_time]=_stats1[_current_time-1]*b; if (_stats1[_current_time] < 1e-7) update(); // to prevent numerical stability problems }; private: Vector<T>& _x; const Vector<T>& _z; const int _n; const int _p; Vector<T> _stats1, _stats2; Vector<int> _dates; int _current_time; }; /// Class for dense vector template<typename T, typename I> class DoubleLazyVector { public: DoubleLazyVector(Vector<T>& x, const Vector<T>& z1, const Vector<T>& z2, const int n) : _x(x), _z1(z1), _z2(z2), _n(n+1), _p(x.n()) { _current_time=0; _dates.resize(_p); _dates.setZeros(); _stats1.resize(n+1); _stats2.resize(n+1); _stats3.resize(n+1); _stats1[0]=T(1.0); _stats2[0]=0; _stats3[0]=0; }; void inline update() { for (int ii=0; ii<_p; ++ii) { update(ii); } _current_time=0; _dates.setZeros(); }; void inline update(const I ind) { const int last_time=_dates[ind]; if (last_time != _current_time) { _x[ind] = _stats1[_current_time]* ( _x[ind]/_stats1[last_time] + (_stats2[_current_time]-_stats2[last_time])*_z1[ind] + (_stats3[_current_time]-_stats3[last_time])*_z2[ind]); _dates[ind]=_current_time; } }; void inline update(const Vector<I>& indices) { const int p = indices.n(); for (int ii=0; ii<p; ++ii) { update(indices[ii]); } }; void inline add_scal(const T a, const T b, const T c) { if (_current_time == _n) update(); _current_time++; _stats1[_current_time]=_stats1[_current_time-1]*c; _stats2[_current_time]=_stats2[_current_time-1] + a/_stats1[_current_time]; _stats3[_current_time]=_stats3[_current_time-1] + b/_stats1[_current_time]; if (_stats1[_current_time] < 1e-6) update(); // to prevent numerical stability problems }; private: Vector<T>& _x; const Vector<T>& _z1; const Vector<T>& _z2; const int _n; const int _p; Vector<T> _stats1, _stats2, _stats3; Vector<int> _dates; int _current_time; }; /* ************************************ * Implementation of the class Matrix * ************************************/ /// Constructor with existing data X of an m x n matrix template <typename T> Matrix<T>::Matrix(T* X, INTM m, INTM n) : _externAlloc(true), _X(X), _m(m), _n(n) { }; /// Constructor for a new m x n matrix template <typename T> Matrix<T>::Matrix(INTM m, INTM n) : _externAlloc(false), _m(m), _n(n) { #pragma omp critical { _X= new T[_n*_m]; } }; /// Empty constructor template <typename T> Matrix<T>::Matrix() : _externAlloc(false), _X(NULL), _m(0), _n(0) { }; /// Destructor template <typename T> Matrix<T>::~Matrix() { clear(); }; /// Return a modifiable reference to X(i,j) template <typename T> inline T& Matrix<T>::operator()(const INTM i, const INTM j) { return _X[j*_m+i]; }; /// Return the value X(i,j) template <typename T> inline T Matrix<T>::operator()(const INTM i, const INTM j) const { return _X[j*_m+i]; }; /// Print the matrix to std::cout template <typename T> inline void Matrix<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _m << " x " << _n << std::endl; for (INTM i = 0; i<_m; ++i) { for (INTM j = 0; j<_n; ++j) { printf("%10.5g ",static_cast<double>(_X[j*_m+i])); } printf("\n "); } printf("\n "); }; /// Print the matrix to std::cout template <typename T> inline void Matrix<T>::dump(const string& name) const { ofstream f; f.open(name); f.precision(20); std::cerr << name << std::endl; f << _m << " x " << _n << std::endl; for (INTM i = 0; i<_m; ++i) { for (INTM j = 0; j<_n; ++j) { f << static_cast<double>(_X[j*_m+i]) << " "; } f << std::endl; } f << std::endl; f.close(); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::copyCol(const INTM i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.resize(_m); cblas_copy<T>(_m,_X+i*_m,1,x._X,1); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::copyRow(const INTM i, Vector<T>& x) const { assert(i >= 0 && i<_m); x.resize(_n); cblas_copy<T>(_n,_X+i,_m,x._X,1); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::scalRow(const INTM i, const T s) const { assert(i >= 0 && i<_m); for (int ii=0; ii<_n; ++ii) _X[i+ii*_m] *= s; }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::copyToRow(const INTM i, const Vector<T>& x) { assert(i >= 0 && i<_m); cblas_copy<T>(_n,x._X,1,_X+i,_m); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::extract_rawCol(const INTM i, T* x) const { assert(i >= 0 && i<_n); cblas_copy<T>(_m,_X+i*_m,1,x,1); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::add_rawCol(const INTM i, T* x, const T a) const { assert(i >= 0 && i<_n); cblas_axpy<T>(_m,a,_X+i*_m,1,x,1); }; /// Copy the column i INTMo x template <typename T> inline void Matrix<T>::getData(Vector<T>& x, const INTM i) const { this->copyCol(i,x); }; /// Reference the column i into the vector x template <typename T> inline void Matrix<T>::refCol(INTM i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.clear(); x._X=_X+i*_m; x._n=_m; x._externAlloc=true; }; /// Reference the column i to i+n INTMo the Matrix mat template <typename T> inline void Matrix<T>::refSubMat(INTM i, INTM n, Matrix<T>& mat) const { mat.setData(_X+i*_m,_m,n); } /// Check wether the columns of the matrix are normalized or not template <typename T> inline bool Matrix<T>::isNormalized() const { for (INTM i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (fabs(norm - 1.0) > 1e-6) return false; } return true; }; /// clean a dictionary matrix template <typename T> inline void Matrix<T>::clean() { this->normalize(); Matrix<T> G; this->XtX(G); T* prG = G._X; /// remove the diagonal for (INTM i = 0; i<_n; ++i) { for (INTM j = i+1; j<_n; ++j) { if (prG[i*_n+j] > 0.99) { // remove nasty column j and put random values inside Vector<T> col; this->refCol(j,col); col.setAleat(); col.normalize(); } } } }; /// return the 1D-index of the value of greatest magnitude template <typename T> inline INTM Matrix<T>::fmax() const { return cblas_iamax<T>(_n*_m,_X,1); }; /// return the value of greatest magnitude template <typename T> inline T Matrix<T>::fmaxval() const { return _X[cblas_iamax<T>(_n*_m,_X,1)]; }; /// return the 1D-index of the value of lowest magnitude template <typename T> inline INTM Matrix<T>::fmin() const { return cblas_iamin<T>(_n*_m,_X,1); }; /// extract a sub-matrix of a symmetric matrix template <typename T> inline void Matrix<T>::subMatrixSym( const Vector<INTM>& indices, Matrix<T>& subMatrix) const { INTM L = indices.n(); subMatrix.resize(L,L); T* out = subMatrix._X; INTM* rawInd = indices.rawX(); for (INTM i = 0; i<L; ++i) for (INTM j = 0; j<=i; ++j) out[i*L+j]=_X[rawInd[i]*_n+rawInd[j]]; subMatrix.fillSymmetric(); }; /// Resize the matrix template <typename T> inline void Matrix<T>::resize(INTM m, INTM n, const bool set_zeros) { if (_n==n && _m==m) return; clear(); _n=n; _m=m; _externAlloc=false; #pragma omp critical { _X=new T[_n*_m]; } if (set_zeros) setZeros(); }; /// Change the data in the matrix template <typename T> inline void Matrix<T>::setData(T* X, INTM m, INTM n) { clear(); _X=X; _m=m; _n=n; _externAlloc=true; }; /// Set all the values to zero template <typename T> inline void Matrix<T>::setZeros() { memset(_X,0,_n*_m*sizeof(T)); }; /// Set all the values to a scalar template <typename T> inline void Matrix<T>::set(const T a) { for (INTM i = 0; i<_n*_m; ++i) _X[i]=a; }; /// Clear the matrix template <typename T> inline void Matrix<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _m=0; _X=NULL; _externAlloc=true; }; /// Put white Gaussian noise in the matrix template <typename T> inline void Matrix<T>::setAleat() { for (INTM i = 0; i<_n*_m; ++i) _X[i]=normalDistrib<T>(); }; /// set the matrix to the identity template <typename T> inline void Matrix<T>::eye() { this->setZeros(); for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] = T(1.0); }; /// Normalize all columns to unit l2 norm template <typename T> inline void Matrix<T>::normalize() { //T constant = 1.0/sqrt(_m); for (INTM i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1e-10) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } else { // for (INTM j = 0; j<_m; ++j) _X[_m*i+j]=constant; Vector<T> d; this->refCol(i,d); d.setAleat(); d.normalize(); } } }; /// Normalize all columns which l2 norm is greater than one. template <typename T> inline void Matrix<T>::normalize2() { for (INTM i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1.0) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } } }; /// center the matrix template <typename T> inline void Matrix<T>::center() { for (INTM i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum(); col.add(-sum/static_cast<T>(_m)); } }; /// center the matrix template <typename T> inline void Matrix<T>::center_rows() { Vector<T> mean_rows(_m); mean_rows.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) mean_rows[j] += _X[i*_m+j]; mean_rows.scal(T(1.0)/_n); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) _X[i*_m+j] -= mean_rows[j]; }; /// center the matrix template <typename T> inline void Matrix<T>::normalize_rows() { Vector<T> norm_rows(_m); norm_rows.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) norm_rows[j] += _X[i*_m+j]*_X[i*_m+j]; norm_rows.sqr(); this->multDiagRight(norm_rows); }; /// center the matrix and keep the center values template <typename T> inline void Matrix<T>::center(Vector<T>& centers) { centers.resize(_n); for (INTM i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum()/static_cast<T>(_m); centers[i]=sum; col.add(-sum); } }; /// scale the matrix by the a template <typename T> inline void Matrix<T>::scal(const T a) { cblas_scal<T>(_n*_m,a,_X,1); }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copy(const Matrix<T>& mat) { if (_X != mat._X) { resize(mat._m,mat._n); // cblas_copy<T>(_m*_n,mat._X,1,_X,1); memcpy(_X,mat._X,_m*_n*sizeof(T)); } }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copyRef(const Matrix<T>& mat) { this->setData(mat.rawX(),mat.m(),mat.n()); }; /// make the matrix symmetric by copying the upper-right part /// INTMo the lower-left part template <typename T> inline void Matrix<T>::fillSymmetric() { for (INTM i = 0; i<_n; ++i) { for (INTM j =0; j<i; ++j) { _X[j*_m+i]=_X[i*_m+j]; } } }; template <typename T> inline void Matrix<T>::fillSymmetric2() { for (INTM i = 0; i<_n; ++i) { for (INTM j =0; j<i; ++j) { _X[i*_m+j]=_X[j*_m+i]; } } }; template <typename T> inline void Matrix<T>::whiten(const INTM V) { const INTM sizePatch=_m/V; for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { T mean = 0; for (INTM k = 0; k<sizePatch; ++k) { mean+=_X[i*_m+sizePatch*j+k]; } mean /= sizePatch; for (INTM k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean; } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const bool pattern) { mean.setZeros(); if (pattern) { const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_m))); INTM count[4]; for (INTM i = 0; i<4; ++i) count[i]=0; for (INTM i = 0; i<_n; ++i) { INTM offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; mean[2*offsetx+offsety]+=_X[i*_m+j*n+k]; count[2*offsetx+offsety]++; } } } for (INTM i = 0; i<4; ++i) mean[i] /= count[i]; for (INTM i = 0; i<_n; ++i) { INTM offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]-=mean[2*offsetx+offsety]; } } } } else { const INTM V = mean.n(); const INTM sizePatch=_m/V; for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { for (INTM k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } mean.scal(T(1.0)/(_n*sizePatch)); for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { for (INTM k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean[j]; } } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const Vector<T>& mask) { const INTM V = mean.n(); const INTM sizePatch=_m/V; mean.setZeros(); for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { for (INTM k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } for (INTM i = 0; i<V; ++i) mean[i] /= _n*cblas_asum(sizePatch,mask._X+i*sizePatch,1); for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { for (INTM k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[i*_m+sizePatch*j+k]-=mean[j]; } } } }; template <typename T> inline void Matrix<T>::unwhiten(Vector<T>& mean, const bool pattern) { if (pattern) { const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_m))); for (INTM i = 0; i<_n; ++i) { INTM offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]+=mean[2*offsetx+offsety]; } } } } else { const INTM V = mean.n(); const INTM sizePatch=_m/V; for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<V; ++j) { for (INTM k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]+=mean[j]; } } } } }; /// Transpose the current matrix and put the result in the matrix /// trans template <typename T> inline void Matrix<T>::transpose(Matrix<T>& trans) const { trans.resize(_n,_m); T* out = trans._X; for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) out[j*_n+i] = _X[i*_m+j]; }; /// A <- -A template <typename T> inline void Matrix<T>::neg() { for (INTM i = 0; i<_n*_m; ++i) _X[i]=-_X[i]; }; template <typename T> inline void Matrix<T>::incrDiag() { for (INTM i = 0; i<MIN(_n,_m); ++i) ++_X[i*_m+i]; }; template <typename T> inline void Matrix<T>::addDiag( const Vector<T>& diag) { T* d= diag.rawX(); for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += d[i]; }; template <typename T> inline void Matrix<T>::addDiag( const T diag) { for (INTM i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += diag; }; template <typename T> inline void Matrix<T>::addToCols( const Vector<T>& cent) { Vector<T> col; for (INTM i = 0; i<_n; ++i) { this->refCol(i,col); col.add(cent[i]); } }; template <typename T> inline void Matrix<T>::addVecToCols( const Vector<T>& vec, const T a) { Vector<T> col; for (INTM i = 0; i<_n; ++i) { this->refCol(i,col); col.add(vec,a); } }; /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). template <typename T> inline void Matrix<T>::svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const { int i; const int max_iter=MAX(_m,MAX(_n,200)); const T eps=1e-10; u.resize(_m); v.resize(_n); T norm=u0.nrm2(); Vector<T> up(u0); if (norm < EPSILON) up.setAleat(); up.normalize(); multTrans(up,v); for (i = 0; i<max_iter; ++i) { mult(v,u); norm=u.nrm2(); u.scal(1.0/norm); multTrans(u,v); T theta=u.dot(up); if (i > 10 && (1 - fabs(theta)) < eps) break; up.copy(u); } }; template <typename T> inline void Matrix<T>::svd2(Matrix<T>& U, Vector<T>& S, const int num, const int method) const { const INTM num_eig= (num == -1 || method <= 1) ? MIN(_m,_n) : MIN(MIN(_m,num),_n); S.resize(num_eig); U.resize(_m,num_eig); if (method==0) { // gesv T* vv = NULL; Matrix<T> copyX; copyX.copy(*this); gesvd<T>(reduced,no,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,vv,1); } else if (method==1) { // syev if (_m == num_eig) { this->XXt(U); syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX()); } else { Matrix<T> XXt(_m,_m); this->XXt(XXt); // in fact should do XtX, but will do that later Vector<T> ss(_m); syev<T>(allV,lower,_m,XXt.rawX(),_m,ss.rawX()); memcpy(U.rawX(),XXt.rawX()+(_m-num_eig)*_m,_m*num_eig*sizeof(T)); memcpy(S.rawX(),ss.rawX()+_m-num_eig,num_eig*sizeof(T)); } S.thrsPos(); S.Sqrt(); } else if (method==2) { // syevr Matrix<T> XXt(_m,_m); this->XXt(XXt); // in fact should do XtX, but will do that later if (_m == num_eig) { syevr(allV,rangeAll,lower,_m,XXt.rawX(),_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m); } else { Vector<T> ss(_m); syevr(allV,range,lower,_m,XXt.rawX(),_m,T(0),T(0),_m-num_eig+1,_m,ss.rawX(),U.rawX(),_m); memcpy(S.rawX(),ss.rawX(),num_eig*sizeof(T)); } S.thrsPos(); for (int ii=0; ii<S.n(); ++ii) S[ii]=alt_sqrt<T>(S[ii]); //S.Sqrt(); } if (method==1 || method==2) { Vector<T> col, col2; Vector<T> tmpcol(_m); const int n=U.n(); for (int ii=0; ii<n/2; ++ii) { T tmp=S[n-ii-1]; S[n-ii-1]=S[ii]; S[ii]=tmp; U.refCol(n-ii-1,col); U.refCol(ii,col2); tmpcol.copy(col); col.copy(col2); col2.copy(tmpcol); } } } template <typename T> inline void Matrix<T>::SymEig(Matrix<T>& U, Vector<T>& S) const { const int num_eig=_m; S.resize(_m); U.resize(_m,_m); syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m); S.thrsPos(); } template <typename T> inline void Matrix<T>::InvsqrtMat(Matrix<T>& out, const T lambda) const { const int num_eig=_m; Vector<T> S; S.resize(_m); Matrix<T> U, U2; U.resize(_m,_m); syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m); S.thrsPos(); //for (int ii=0; ii<_m; ++ii) S[ii]=sqrt(S[ii])/(S[ii]+lambda); //for (int ii=0; ii<_m; ++ii) S[ii]= S[ii] > 1e-6 ? T(1.0)/S[ii] : 0; for (int ii=0; ii<_m; ++ii) S[ii]= S[ii] > 1e-6 ? T(1.0)/sqrt(S[ii]+lambda) : 0; U2.copy(U); U2.multDiagRight(S); U2.mult(U,out,false,true); } template <typename T> inline void Matrix<T>::sqrtMat(Matrix<T>& out) const { const int num_eig=_m; Vector<T> S; S.resize(_m); Matrix<T> U, U2; U.resize(_m,_m); syevr(allV,rangeAll,lower,_m,_X,_m,T(0),T(0),0,0,S.rawX(),U.rawX(),_m); S.thrsPos(); S.Sqrt(); U2.copy(U); U2.multDiagRight(S); U2.mult(U,out,false,true); } template <typename T> inline void Matrix<T>::singularValues(Vector<T>& u) const { u.resize(MIN(_m,_n)); if (_m > 10*_n) { Matrix<T> XtX; this->XtX(XtX); syev<T>(no,lower,_n,XtX.rawX(),_n,u.rawX()); u.thrsPos(); u.Sqrt(); } else if (_n > 10*_m) { Matrix<T> XXt; this->XXt(XXt); syev<T>(no,lower,_m,XXt.rawX(),_m,u.rawX()); u.thrsPos(); u.Sqrt(); } else { T* vu = NULL; T* vv = NULL; Matrix<T> copyX; copyX.copy(*this); gesvd<T>(no,no,_m,_n,copyX._X,_m,u.rawX(),vu,1,vv,1); } }; template <typename T> inline void Matrix<T>::svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const { const INTM num_eig=MIN(_m,_n); S.resize(num_eig); U.resize(_m,num_eig); V.resize(num_eig,_n); if (_m > 10*_n) { Matrix<T> Vt(_n,_n); this->XtX(Vt); syev<T>(allV,lower,_n,Vt.rawX(),_n,S.rawX()); S.thrsPos(); S.Sqrt(); this->mult(Vt,U); Vt.transpose(V); Vector<T> inveigs; inveigs.copy(S); for (INTM i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } U.multDiagRight(inveigs); } else if (_n > 10*_m) { this->XXt(U); syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX()); S.thrsPos(); S.Sqrt(); U.mult(*this,V,true,false); Vector<T> inveigs; inveigs.copy(S); for (INTM i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } V.multDiagLeft(inveigs); } else { Matrix<T> copyX; copyX.copy(*this); gesvd<T>(reduced,reduced,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,V.rawX(),num_eig); } }; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method template <typename T> inline void Matrix<T>::eigLargestSymApprox( const Vector<T>& u0, Vector<T>& u) const { int i,j; const int max_iter=100; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); T theta; u.scal(1.0/norm); Vector<T> up(u); Vector<T> uor(u); T lambda=T(); for (j = 0; j<2;++j) { up.copy(u); for (i = 0; i<max_iter; ++i) { mult(up,u); norm = u.nrm2(); u.scal(1.0/norm); theta=u.dot(up); if ((1 - fabs(theta)) < eps) break; up.copy(u); } lambda+=theta*norm; if (isnan(lambda)) { std::cerr << "eigLargestSymApprox failed" << std::endl; exit(1); } if (j == 1 && lambda < eps) { u.copy(uor); break; } if (theta >= 0) break; u.copy(uor); for (i = 0; i<_m; ++i) _X[i*_m+i]-=lambda; } }; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. template <typename T> inline T Matrix<T>::eigLargestMagnSym( const Vector<T>& u0, Vector<T>& u) const { const int max_iter=1000; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (norm > 0) u.scal(1.0/norm); if (norm == 0 || fabs(norm-lambda)/norm < eps) break; lambda=norm; } return norm; }; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. template <typename T> inline T Matrix<T>::eigLargestMagnSym() const { const int max_iter=1000; const T eps=10e-6; Vector<T> u(_m); u.setAleat(); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (fabs(norm-lambda) < eps) break; lambda=norm; u.scal(1.0/norm); } return norm; }; /// inverse the matrix when it is symmetric template <typename T> inline void Matrix<T>::invSym() { sytri<T>(upper,_n,_X,_n); this->fillSymmetric(); }; template <typename T> inline void Matrix<T>::invSymPos() { potri<T>(upper,_n,_X,_n); this->fillSymmetric(); }; /// perform b = alpha*A'x + beta*b template <typename T> inline void Matrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { b.resize(_n); // assert(x._n == _m && b._n == _n); cblas_gemv<T>(CblasColMajor,CblasTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = A'x, when x is sparse template <typename T> template <typename I> inline void Matrix<T>::multTrans(const SpVector<T,I>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_n); Vector<T> col; if (beta) { for (INTM i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = alpha*col.dot(x); } } else { for (INTM i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = beta*b._X[i]+alpha*col.dot(x); } } }; template <typename T> inline void Matrix<T>::multTrans( const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const { b.setZeros(); Vector<T> col; bool* pr_active=active.rawX(); for (INTM i = 0; i<_n; ++i) { if (pr_active[i]) { this->refCol(i,col); b._X[i]=col.dot(x); } } }; /// perform b = alpha*A*x+beta*b template <typename T> inline void Matrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { // assert(x._n == _n && b._n == _m); b.resize(_m); cblas_gemv<T>(CblasColMajor,CblasNoTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = alpha*A*x+beta*b template <typename T> inline void Matrix<T>::mult_loop(const Vector<T>& x, Vector<T>& b) const { b.resize(_m); for (int ii=0; ii<_m; ++ii) { b[ii]=cblas_dot<T>(_n,x._X,1,_X+ii,_m); } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> template <typename I> inline void Matrix<T>::mult(const SpVector<T,I>& x, Vector<T>& b, const T a, const T a2) const { if (!a2) { b.setZeros(); } else if (a2 != 1.0) { b.scal(a2); } if (a == 1.0) { for (INTM i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,x._v[i],_X+x._r[i]*_m,1,b._X,1); } } else { for (INTM i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,a*x._v[i],_X+x._r[i]*_m,1,b._X,1); } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { CBLAS_TRANSPOSE trA,trB; INTM m,k,n; if (transA) { trA = CblasTrans; m = _n; k = _m; } else { trA= CblasNoTrans; m = _m; k = _n; } if (transB) { trB = CblasTrans; n = B._m; //assert(B._n == k); } else { trB = CblasNoTrans; n = B._n; //assert(B._m == k); } C.resize(m,n); cblas_gemm<T>(CblasColMajor,trA,trB,m,n,k,a,_X,_m,B._X,B._m, b,C._X,C._m); }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; /// perform C = A*B, when B is sparse template <typename T> template <typename I> inline void Matrix<T>::mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> rowC(B.m()); Vector<T> colA; for (INTM i = 0; i<_n; ++i) { this->refCol(i,colA); B.mult(colA,rowC,a); C.addRow(i,rowC,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T,I> colB; for (INTM i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->multTrans(colB,colC,a,T(1.0)); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colA; SpVector<T,I> colB; for (INTM i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T,I> colB; for (INTM i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->mult(colB,colC,a,T(1.0)); } } }; } /// mult by a diagonal matrix on the left template <typename T> inline void Matrix<T>::multDiagLeft(const Vector<T>& diag) { if (diag.n() != _m) return; T* d = diag.rawX(); for (INTM i = 0; i< _n; ++i) { for (INTM j = 0; j<_m; ++j) { _X[i*_m+j] *= d[j]; } } }; /// mult by a diagonal matrix on the right template <typename T> inline void Matrix<T>::multDiagRight( const Vector<T>& diag) { if (diag.n() != _n) return; T* d = diag.rawX(); for (INTM i = 0; i< _n; ++i) { for (INTM j = 0; j<_m; ++j) { _X[i*_m+j] *= d[i]; } } }; /// mult by a diagonal matrix on the right template <typename T> inline void Matrix<T>::AddMultDiagRight( const Vector<T>& diag, Matrix<T>& mat) { if (diag.n() != _n) return; mat.resize(_m,_n); //mat.setZeros(); T* d = diag.rawX(); for (INTM i = 0; i< _n; ++i) { cblas_axpy<T>(_m,d[i],_X+i*_m,1,mat._X+i*_m,1); } }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::mult_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vMul<T>(_n*_m,_X,B._X,C._X); }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::div_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vDiv<T>(_n*_m,_X,B._X,C._X); }; /// XtX = A'*A template <typename T> inline void Matrix<T>::XtX(Matrix<T>& xtx) const { xtx.resize(_n,_n); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasTrans,_n,_m,T(1.0), _X,_m,T(),xtx._X,_n); xtx.fillSymmetric(); }; /// XXt = A*At template <typename T> inline void Matrix<T>::XXt(Matrix<T>& xxt) const { xxt.resize(_m,_m); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasNoTrans,_m,_n,T(1.0), _X,_m,T(),xxt._X,_m); xxt.fillSymmetric(); }; /// XXt = A*A' where A is an upper triangular matrix template <typename T> inline void Matrix<T>::upperTriXXt(Matrix<T>& XXt, const INTM L) const { XXt.resize(L,L); for (INTM i = 0; i<L; ++i) { cblas_syr<T>(CblasColMajor,CblasUpper,i+1,T(1.0),_X+i*_m,1,XXt._X,L); } XXt.fillSymmetric(); } /// extract the diagonal template <typename T> inline void Matrix<T>::diag(Vector<T>& dv) const { INTM size_diag=MIN(_n,_m); dv.resize(size_diag); T* const d = dv.rawX(); for (INTM i = 0; i<size_diag; ++i) d[i]=_X[i*_m+i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const Vector<T>& dv) { INTM size_diag=MIN(_n,_m); T* const d = dv.rawX(); for (INTM i = 0; i<size_diag; ++i) _X[i*_m+i]=d[i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const T val) { INTM size_diag=MIN(_n,_m); for (INTM i = 0; i<size_diag; ++i) _X[i*_m+i]=val; }; /// each element of the matrix is replaced by its exponential template <typename T> inline void Matrix<T>::exp() { vExp<T>(_n*_m,_X,_X); }; /// each element of the matrix is replaced by its exponential template <typename T> inline void Matrix<T>::pow(const T a) { vPowx<T>(_n*_m,_X,a,_X); }; template <typename T> inline void Matrix<T>::sqr() { vSqr<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Sqrt() { vSqrt<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Invsqrt() { vInvSqrt<T>(_n*_m,_X,_X); }; /// return vec1'*A*vec2, where vec2 is sparse template <typename T> template <typename I> inline T Matrix<T>::quad(const SpVector<T,I>& vec) const { T sum = T(); INTM L = vec._L; I* r = vec._r; T* v = vec._v; for (INTM i = 0; i<L; ++i) for (INTM j = 0; j<L; ++j) sum += _X[r[i]*_m+r[j]]*v[i]*v[j]; return sum; }; template <typename T> template <typename I> inline void Matrix<T>::quad_mult(const Vector<T>& vec1, const SpVector<T,I>& vec2, Vector<T>& y, const T a, const T b) const { const INTM size_y= y.n(); const INTM nn = _n/size_y; //y.resize(size_y); //y.setZeros(); Matrix<T> tmp; for (INTM i = 0; i<size_y; ++i) { tmp.setData(_X+(i*nn)*_m,_m,nn); y[i]=b*y[i]+a*tmp.quad(vec1,vec2); } } /// return vec'*A*vec when vec is sparse template <typename T> template <typename I> inline T Matrix<T>::quad( const Vector<T>& vec1, const SpVector<T,I>& vec) const { T sum = T(); INTM L = vec._L; I* r = vec._r; T* v = vec._v; Vector<T> col; for (INTM i = 0; i<L; ++i) { this->refCol(r[i],col); sum += v[i]*col.dot(vec1); } return sum; }; /// add alpha*mat to the current matrix template <typename T> inline void Matrix<T>::add(const Matrix<T>& mat, const T alpha) { assert(mat._m == _m && mat._n == _n); cblas_axpy<T>(_n*_m,alpha,mat._X,1,_X,1); }; /// add alpha*mat to the current matrix template <typename T> inline void Matrix<T>::add_scal(const Matrix<T>& mat, const T alpha, const T beta) { assert(mat._m == _m && mat._n == _n); cblas_axpby<T>(_n*_m,alpha,mat._X,1,beta,_X,1); }; /// add alpha*mat to the current matrix template <typename T> inline T Matrix<T>::dot(const Matrix<T>& mat) const { assert(mat._m == _m && mat._n == _n); return cblas_dot<T>(_n*_m,mat._X,1,_X,1); }; /// add alpha to the current matrix template <typename T> inline void Matrix<T>::add(const T alpha) { for (INTM i = 0; i<_n*_m; ++i) _X[i]+=alpha; }; /// substract the matrix mat to the current matrix template <typename T> inline void Matrix<T>::sub(const Matrix<T>& mat) { vSub<T>(_n*_m,_X,mat._X,_X); }; /// compute the sum of the magnitude of the matrix values template <typename T> inline T Matrix<T>::asum() const { return cblas_asum<T>(_n*_m,_X,1); }; template <typename T> inline T Matrix<T>::sum() const { T sum=0; for (INTM i =0; i<_n*_m; ++i) sum+=_X[i]; return sum; }; /// returns the trace of the matrix template <typename T> inline T Matrix<T>::trace() const { T sum=T(); INTM m = MIN(_n,_m); for (INTM i = 0; i<m; ++i) sum += _X[i*_m+i]; return sum; }; /// return ||A||_F template <typename T> inline T Matrix<T>::normF() const { return cblas_nrm2<T>(_n*_m,_X,1); }; template <typename T> inline T Matrix<T>::mean() const { Vector<T> vec; this->toVect(vec); return vec.mean(); }; template <typename T> inline T Matrix<T>::abs_mean() const { Vector<T> vec; this->toVect(vec); return vec.abs_mean(); }; /// return ||A||_F^2 template <typename T> inline T Matrix<T>::normFsq() const { return cblas_dot<T>(_n*_m,_X,1,_X,1); }; /// return ||At||_{inf,2} template <typename T> inline T Matrix<T>::norm_inf_2_col() const { Vector<T> col; T max = -1.0; for (INTM i = 0; i<_n; ++i) { refCol(i,col); T norm_col = col.nrm2(); if (norm_col > max) max = norm_col; } return max; }; /// return ||At||_{1,2} template <typename T> inline T Matrix<T>::norm_1_2_col() const { Vector<T> col; T sum = 0.0; for (INTM i = 0; i<_n; ++i) { refCol(i,col); sum += col.nrm2(); } return sum; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; for (INTM j = 0; j<_m; ++j) norms[j]=sqrt(norms[j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (INTM i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_cols(Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (INTM i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.fmaxval(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) norms[j] = MAX(abs<T>(_X[i*_m+j]),norms[j]); }; template <typename T> inline void Matrix<T>::get_sum_cols(Vector<T>& sum) const { sum.resize(_n); for (INTM i = 0; i<_n; ++i) { sum[i]=0; for (INTM j = 0; j<_m; ++j) sum[i] += (_X[i*_m+j]); } }; template <typename T> inline void Matrix<T>::dot_col(const Matrix<T>& mat, Vector<T>& dots) const { dots.resize(_n); for (INTM i = 0; i<_n; ++i) dots[i] = cblas_dot<T>(_m,_X+i*_m,1,mat._X+i*_m,1); } /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_l1_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (INTM i = 0; i<_n; ++i) for (INTM j = 0; j<_m; ++j) norms[j] += abs<T>(_X[i*_m+j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (INTM i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T> inline void Matrix<T>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); Vector<T> tmp; for (INTM i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// Compute the mean of the columns template <typename T> inline void Matrix<T>::meanCol(Vector<T>& mean) const { Vector<T> ones(_n); ones.set(T(1.0/_n)); this->mult(ones,mean,1.0,0.0); }; /// Compute the mean of the rows template <typename T> inline void Matrix<T>::meanRow(Vector<T>& mean) const { Vector<T> ones(_m); ones.set(T(1.0/_m)); this->multTrans(ones,mean,1.0,0.0); }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::fillRow(const Vector<T>& row) { for (INTM i = 0; i<_n; ++i) { T val = row[i]; for (INTM j = 0; j<_m; ++j) { _X[i*_m+j]=val; } } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::extractRow(const INTM j, Vector<T>& row) const { row.resize(_n); for (INTM i = 0; i<_n; ++i) { row[i]=_X[i*_m+j]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::setRow(const INTM j, const Vector<T>& row) { for (INTM i = 0; i<_n; ++i) { _X[i*_m+j]=row[i]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::addRow(const INTM j, const Vector<T>& row, const T a) { if (a==1.0) { for (INTM i = 0; i<_n; ++i) { _X[i*_m+j]+=row[i]; } } else { for (INTM i = 0; i<_n; ++i) { _X[i*_m+j]+=a*row[i]; } } }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::softThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.softThrshold(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::fastSoftThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.fastSoftThrshold(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::fastSoftThrshold(Matrix<T>& output, const T nu) const { output.resize(_m,_n,false); Vector<T> vec, vec2; toVect(vec); output.toVect(vec2); vec.fastSoftThrshold(vec2,nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::hardThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.hardThrshold(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmax(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmax(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmin(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmin(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::inv_elem() { Vector<T> vec; toVect(vec); vec.inv(); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::blockThrshold(const T nu, const INTM sizeGroup) { for (INTM i = 0; i<_n; ++i) { INTM j; for (j = 0; j<_m-sizeGroup+1; j+=sizeGroup) { T nrm=0; for (INTM k = 0; k<sizeGroup; ++k) nrm += _X[i*_m +j+k]*_X[i*_m +j+k]; nrm=sqrt(nrm); if (nrm < nu) { for (INTM k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]=0; } else { T scal = (nrm-nu)/nrm; for (INTM k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]*=scal; } } j -= sizeGroup; for ( ; j<_m; ++j) _X[j]=softThrs<T>(_X[j],nu); } } template <typename T> inline void Matrix<T>::sparseProject(Matrix<T>& Y, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos, const int numThreads) { int NUM_THREADS=init_omp(numThreads); Vector<T>* XXT= new Vector<T>[NUM_THREADS]; for (int i = 0; i<NUM_THREADS; ++i) { XXT[i].resize(_m); } int i; #pragma omp parallel for private(i) for (i = 0; i< _n; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif Vector<T> Xi; this->refCol(i,Xi); Vector<T> Yi; Y.refCol(i,Yi); Vector<T>& XX = XXT[numT]; XX.copy(Xi); XX.sparseProject(Yi,thrs,mode,lambda1,lambda2,lambda3,pos); } delete[](XXT); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsPos() { Vector<T> vec; toVect(vec); vec.thrsPos(); }; /// perform A <- A + alpha*vec1*vec2' template <typename T> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const Vector<T>& vec2, const T alpha) { cblas_ger<T>(CblasColMajor,_m,_n,alpha,vec1._X,1,vec2._X,1,_X,_m); }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> template <typename I> inline void Matrix<T>::rank1Update( const SpVector<T,I>& vec1, const Vector<T>& vec2, const T alpha) { I* r = vec1._r; T* v = vec1._v; T* X2 = vec2._X; assert(vec2._n == _n); if (alpha == 1.0) { for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += v[j]*X2[i]; } } } else { for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += alpha*v[j]*X2[i]; } } } }; template <typename T> template <typename I> inline void Matrix<T>::rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T,I>& vec2, const T alpha) { const INTM nn = vec1b.n(); const INTM size_A = _n/nn; Matrix<T> tmp; for (INTM i = 0; i<nn; ++i) { tmp.setData(_X+i*size_A*_m,_m,size_A); tmp.rank1Update(vec1,vec2,alpha*vec1b[i]); } }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> template <typename I> inline void Matrix<T>::rank1Update( const SpVector<T,I>& vec1, const SpVector<T,I>& vec2, const T alpha) { I* r = vec1._r; T* v = vec1._v; T* v2 = vec2._v; I* r2 = vec2._r; if (alpha == 1.0) { for (INTM i = 0; i<vec2._L; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[r2[i]*_m+r[j]] += v[j]*v2[i]; } } } else { for (INTM i = 0; i<vec2._L; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += alpha*v[j]*v2[i]; } } } }; /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse template <typename T> template <typename I> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const SpVector<T,I>& vec2, const T alpha) { I* r = vec2._r; T* v = vec2._v; Vector<T> Xi; for (INTM i = 0; i<vec2._L; ++i) { this->refCol(r[i],Xi); Xi.add(vec1,v[i]*alpha); } }; /// perform A <- A + alpha*vec1*vec1', when vec1 is sparse template <typename T> template <typename I> inline void Matrix<T>::rank1Update( const SpVector<T,I>& vec1, const T alpha) { I* r = vec1._r; T* v = vec1._v; if (alpha == 1.0) { for (INTM i = 0; i<vec1._L; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += v[j]*v[i]; } } } else { for (INTM i = 0; i<vec1._L; ++i) { for (INTM j = 0; j<vec1._L; ++j) { _X[_m*r[i]+r[j]] += alpha*v[j]*v[i]; } } } }; /// compute x, such that b = Ax, template <typename T> inline void Matrix<T>::conjugateGradient( const Vector<T>& b, Vector<T>& x, const T tol, const int itermax) const { Vector<T> R,P,AP; R.copy(b); this->mult(x,R,T(-1.0),T(1.0)); P.copy(R); int k = 0; T normR = R.nrm2sq(); T alpha; while (normR > tol && k < itermax) { this->mult(P,AP); alpha = normR/P.dot(AP); x.add(P,alpha); R.add(AP,-alpha); T tmp = R.nrm2sq(); P.scal(tmp/normR); normR = tmp; P.add(R,T(1.0)); ++k; }; }; template <typename T> inline void Matrix<T>::drop(char* fileName) const { std::ofstream f; f.precision(12); f.flags(std::ios_base::scientific); f.open(fileName, ofstream::trunc); std::cout << "Matrix written in " << fileName << std::endl; for (INTM i = 0; i<_n; ++i) { for (INTM j = 0; j<_m; ++j) f << _X[i*_m+j] << " "; f << std::endl; } f.close(); }; /// compute a Nadaraya Watson estimator template <typename T> inline void Matrix<T>::NadarayaWatson( const Vector<INTM>& ind, const T sigma) { if (ind.n() != _n) return; init_omp(MAX_THREADS); const INTM Ngroups=ind.maxval(); INTM i; #pragma omp parallel for private(i) for (i = 1; i<=Ngroups; ++i) { Vector<INTM> indicesGroup(_n); INTM count = 0; for (INTM j = 0; j<_n; ++j) if (ind[j] == i) indicesGroup[count++]=j; Matrix<T> Xm(_m,count); Vector<T> col, col2; for (INTM j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); Xm.refCol(j,col2); col2.copy(col); } Vector<T> norms; Xm.norm_2sq_cols(norms); Matrix<T> weights; Xm.XtX(weights); weights.scal(T(-2.0)); Vector<T> ones(Xm.n()); ones.set(T(1.0)); weights.rank1Update(ones,norms); weights.rank1Update(norms,ones); weights.scal(-sigma); weights.exp(); Vector<T> den; weights.mult(ones,den); den.inv(); weights.multDiagRight(den); Matrix<T> num; Xm.mult(weights,num); for (INTM j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); num.refCol(j,col2); col.copy(col2); } } }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparse(SpMatrix<T>& out) const { out.clear(); INTM count=0; INTM* pB; #pragma omp critical { pB=new INTM[_n+1]; } INTM* pE=pB+1; for (INTM i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; INTM* r; T* v; #pragma omp critical { r=new INTM[count]; v=new T[count]; } count=0; for (INTM i = 0; i<_n; ++i) { pB[i]=count; for (INTM j = 0; j<_m; ++j) { if (_X[i*_m+j] != 0) { v[count]=_X[i*_m+j]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_m; out._n=_n; out._nzmax=count; out._externAlloc=false; }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparseTrans( SpMatrix<T>& out) { out.clear(); INTM count=0; INTM* pB; #pragma omp critical { pB=new INTM[_m+1]; } INTM* pE=pB+1; for (INTM i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; INTM* r; T* v; #pragma omp critical { r=new INTM[count]; v=new T[count]; } count=0; for (INTM i = 0; i<_m; ++i) { pB[i]=count; for (INTM j = 0; j<_n; ++j) { if (_X[i+j*_m] != 0) { v[count]=_X[j*_m+i]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_n; out._n=_m; out._nzmax=count; out._externAlloc=false; }; /// make a reference of the matrix to a vector vec template <typename T> inline void Matrix<T>::toVect( Vector<T>& vec) const { vec.clear(); vec._externAlloc=true; vec._n=_n*_m; vec._X=_X; }; /* *********************************** * Implementation of the class Vector * ***********************************/ /// Empty constructor template <typename T> Vector<T>::Vector() : _externAlloc(true), _X(NULL), _n(0) { }; /// Constructor. Create a new vector of size n template <typename T> Vector<T>::Vector(INTM n) : _externAlloc(false), _n(n) { #pragma omp critical { _X=new T[_n]; } }; /// Constructor with existing data template <typename T> Vector<T>::Vector(T* X, INTM n) : _externAlloc(true), _X(X), _n(n) { }; /// Copy constructor template <typename T> Vector<T>::Vector(const Vector<T>& vec) : _externAlloc(false), _n(vec._n) { #pragma omp critical { _X=new T[_n]; } cblas_copy<T>(_n,vec._X,1,_X,1); }; /// Destructor template <typename T> Vector<T>::~Vector() { clear(); }; /// Print the matrix to std::cout template <typename T> inline void Vector<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _n << std::endl; for (INTM j = 0; j<_n; ++j) { printf("%10.5g ",static_cast<double>(_X[j])); } printf("\n "); }; /// Print the matrix to std::cout template <typename T> inline void Vector<T>::dump(const string& name) const { ofstream f; f.open(name); f.precision(20); std::cerr << name << std::endl; f << _n << std::endl; for (INTM j = 0; j<_n; ++j) { f << static_cast<double>(_X[j]) << " "; } f << std::endl; f.close(); }; /// Print the vector to std::cout template <> inline void Vector<double>::print(const char* name) const { printf("%s, %d\n",name,(int)_n); for (INTM i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<float>::print(const char* name) const { printf("%s, %d\n",name,(int)_n); for (INTM i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<int>::print(const char* name) const { printf("%s, %d\n",name,(int)_n); for (INTM i = 0; i<_n; ++i) { printf("%d ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<bool>::print(const char* name) const { printf("%s, %d\n",name,(int)_n); for (INTM i = 0; i<_n; ++i) { printf("%d ",_X[i] ? 1 : 0); } printf("\n"); }; /// returns the index of the largest value template <typename T> inline INTM Vector<T>::max() const { INTM imax=0; T max=_X[0]; for (INTM j = 1; j<_n; ++j) { T cur = _X[j]; if (cur > max) { imax=j; max = cur; } } return imax; }; /// returns the index of the minimum value template <typename T> inline INTM Vector<T>::min() const { INTM imin=0; T min=_X[0]; for (INTM j = 1; j<_n; ++j) { T cur = _X[j]; if (cur < min) { imin=j; min = cur; } } return imin; }; /// returns the maximum value template <typename T> inline T Vector<T>::maxval() const { return _X[this->max()]; }; /// returns the minimum value template <typename T> inline T Vector<T>::minval() const { return _X[this->min()]; }; /// returns the maximum magnitude template <typename T> inline T Vector<T>::fmaxval() const { return fabs(_X[this->fmax()]); }; /// returns the minimum magnitude template <typename T> inline T Vector<T>::fminval() const { return fabs(_X[this->fmin()]); }; template <typename T> inline void Vector<T>::logspace(const INTM n, const T a, const T b) { T first=log10(a); T last=log10(b); T step = (last-first)/(n-1); this->resize(n); _X[0]=first; for (INTM i = 1; i<_n; ++i) _X[i]=_X[i-1]+step; for (INTM i = 0; i<_n; ++i) _X[i]=pow(T(10.0),_X[i]); } template <typename T> inline INTM Vector<T>::nnz() const { INTM sum=0; for (INTM i = 0; i<_n; ++i) if (_X[i] != T()) ++sum; return sum; }; /// generate logarithmically spaced values template <> inline void Vector<INTM>::logspace(const INTM n, const INTM a, const INTM b) { Vector<double> tmp(n); tmp.logspace(n,double(a),double(b)); this->resize(n); _X[0]=a; _X[n-1]=b; for (INTM i = 1; i<_n-1; ++i) { INTM candidate=static_cast<INTM>(floor(static_cast<double>(tmp[i]))); _X[i]= candidate > _X[i-1] ? candidate : _X[i-1]+1; } } /// returns the index of the value with largest magnitude template <typename T> inline INTM Vector<T>::fmax() const { return cblas_iamax<T>(_n,_X,1); }; /// returns the index of the value with smallest magnitude template <typename T> inline INTM Vector<T>::fmin() const { return cblas_iamin<T>(_n,_X,1); }; /// returns a reference to X[index] template <typename T> inline T& Vector<T>::operator[] (const INTM i) { assert(i>=0 && i<_n); return _X[i]; }; /// returns X[index] template <typename T> inline T Vector<T>::operator[] (const INTM i) const { assert(i>=0 && i<_n); return _X[i]; }; /// make a copy of x template <typename T> inline void Vector<T>::copy(const Vector<T>& x) { if (_X != x._X) { this->resize(x.n()); //cblas_copy<T>(_n,x._X,1,_X,1); memcpy(_X,x._X,_n*sizeof(T)); } }; /// make a copy of x template <typename T> inline void Vector<T>::copyRef(const Vector<T>& x) { this->setData(x.rawX(),x.n()); }; /// Set all values to zero template <typename T> inline void Vector<T>::setZeros() { memset(_X,0,_n*sizeof(T)); }; /// resize the vector template <typename T> inline void Vector<T>::resize(const INTM n, const bool set_zeros) { if (_n == n) return; clear(); #pragma omp critical { _X=new T[n]; } _n=n; _externAlloc=false; if (set_zeros) this->setZeros(); }; /// change the data of the vector template <typename T> inline void Vector<T>::setPointer(T* X, const INTM n) { clear(); _externAlloc=true; _X=X; _n=n; }; /// put a random permutation of size n (for integral vectors) template <> inline void Vector<int>::randi(int n) { for (int i = 0; i<_n; ++i) _X[i]=static_cast<int>(random() % n); }; /// put a random permutation of size n (for integral vectors) template <> inline void Vector<int>::randperm(int n) { resize(n); Vector<int> table(n); for (int i = 0; i<n; ++i) table[i]=i; int size=n; for (int i = 0; i<n; ++i) { const int ind=random() % size; _X[i]=table[ind]; table[ind]=table[size-1]; --size; } }; /// put random values in the vector (white Gaussian Noise) template <typename T> inline void Vector<T>::setAleat() { for (INTM i = 0; i<_n; ++i) _X[i]=normalDistrib<T>(); }; /// clear the vector template <typename T> inline void Vector<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _X=NULL; _externAlloc=true; }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::softThrshold(const T nu) { for (INTM i = 0; i<_n; ++i) { if (_X[i] > nu) { _X[i] -= nu; } else if (_X[i] < -nu) { _X[i] += nu; } else { _X[i] = 0; } } }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::fastSoftThrshold(const T nu) { //#pragma omp parallel for for (INTM i = 0; i<_n; ++i) _X[i]=fastSoftThrs(_X[i],nu); }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::fastSoftThrshold(Vector<T>& output, const T nu) const { output.resize(_n,false); //#pragma omp parallel for for (INTM i = 0; i<_n; ++i) output[i]=fastSoftThrs(_X[i],nu); }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::softThrsholdScal(Vector<T>& out, const T nu, const T s) { T* Y = out.rawX(); for (INTM i = 0; i<_n; ++i) { if (_X[i] > nu) { Y[i] = s*(_X[i]-nu); } else if (_X[i] < -nu) { Y[i] = s*(_X[i]+nu); } else { Y[i] = 0; } } }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::hardThrshold(const T nu) { for (INTM i = 0; i<_n; ++i) { if (!(_X[i] > nu || _X[i] < -nu)) { _X[i] = 0; } } }; /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmax(const T nu) { //#pragma omp parallel for private(i) for (INTM i = 0; i<_n; ++i) if (_X[i] < nu) _X[i]=nu; } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmin(const T nu) { for (INTM i = 0; i<_n; ++i) _X[i]=MIN(_X[i],nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsabsmin(const T nu) { for (INTM i = 0; i<_n; ++i) _X[i]=MAX(MIN(_X[i],nu),-nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrshold(const T nu) { for (INTM i = 0; i<_n; ++i) if (abs<T>(_X[i]) < nu) _X[i]=0; } /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::thrsPos() { for (INTM i = 0; i<_n; ++i) { if (_X[i] < 0) _X[i]=0; } }; template <> inline bool Vector<bool>::alltrue() const { for (INTM i = 0; i<_n; ++i) { if (!_X[i]) return false; } return true; }; template <> inline bool Vector<bool>::allfalse() const { for (INTM i = 0; i<_n; ++i) { if (_X[i]) return false; } return true; }; /// set each value of the vector to val template <typename T> inline void Vector<T>::set(const T val) { for (INTM i = 0; i<_n; ++i) _X[i]=val; }; /// returns ||A||_2 template <typename T> inline T Vector<T>::nrm2() const { return cblas_nrm2<T>(_n,_X,1); }; /// returns ||A||_2^2 template <typename T> inline T Vector<T>::nrm2sq() const { return cblas_dot<T>(_n,_X,1,_X,1); }; /// returns A'x template <typename T> inline T Vector<T>::dot(const Vector<T>& x) const { assert(_n == x._n); return cblas_dot<T>(_n,_X,1,x._X,1); }; /// returns A'x, when x is sparse template <typename T> template <typename I> inline T Vector<T>::dot(const SpVector<T,I>& x) const { T sum=0; const I* r = x.rawR(); const T* v = x.rawX(); for (INTT i = 0; i<x._L; ++i) { sum += _X[r[i]]*v[i]; } return sum; //return cblas_doti<T>(x._L,x._v,x._r,_X); }; /// A <- A + a*x template <typename T> inline void Vector<T>::add(const Vector<T>& x, const T a) { assert(_n == x._n); cblas_axpy<T>(_n,a,x._X,1,_X,1); }; template <typename T> inline void Vector<T>::add_scal(const Vector<T>& x, const T a, const T b) { assert(_n == x._n); cblas_axpby<T>(_n,a,x._X,1,b,_X,1); }; /// A <- A + a*x template <typename T> template <typename I> inline void Vector<T>::add(const SpVector<T,I>& x, const T a) { if (a == 1.0) { for (INTM i = 0; i<x._L; ++i) _X[x._r[i]]+=x._v[i]; } else { for (INTM i = 0; i<x._L; ++i) _X[x._r[i]]+=a*x._v[i]; } }; /// A <- A + a*x template <typename T> template <typename I> inline void Vector<T>::add_scal(const SpVector<T,I>& x, const T a, const T b) { if (b != T(1.0)) { if (b==0) { this->setZeros(); } else { this->scal(b); } } if (a == T(1.0)) { for (I i = 0; i<x._L; ++i) _X[x._r[i]]+=x._v[i]; } else { for (I i = 0; i<x._L; ++i) _X[x._r[i]]+=a*x._v[i]; } }; /// adds a to each value in the vector template <typename T> inline void Vector<T>::add(const T a) { for (INTM i = 0; i<_n; ++i) _X[i]+=a; }; /// A <- A - x template <typename T> inline void Vector<T>::sub(const Vector<T>& x) { assert(_n == x._n); vSub<T>(_n,_X,x._X,_X); }; /// A <- A + a*x template <typename T> template <typename I> inline void Vector<T>::sub(const SpVector<T,I>& x) { for (INTM i = 0; i<x._L; ++i) _X[x._r[i]]-=x._v[i]; }; /// A <- A ./ x template <typename T> inline void Vector<T>::div(const Vector<T>& x) { assert(_n == x._n); vDiv<T>(_n,_X,x._X,_X); }; /// A <- x ./ y template <typename T> inline void Vector<T>::div(const Vector<T>& x, const Vector<T>& y) { assert(_n == x._n); vDiv<T>(_n,x._X,y._X,_X); }; /// A <- x .^ 2 template <typename T> inline void Vector<T>::sqr(const Vector<T>& x) { this->resize(x._n); vSqr<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::sqr() { vSqr<T>(_n,_X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt(const Vector<T>& x) { this->resize(x._n); vInvSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt(const Vector<T>& x) { this->resize(x._n); vSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt() { vInvSqrt<T>(_n,_X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt() { vSqrt<T>(_n,_X,_X); } /// A <- 1./x template <typename T> inline void Vector<T>::inv(const Vector<T>& x) { this->resize(x.n()); vInv<T>(_n,x._X,_X); }; /// A <- 1./A template <typename T> inline void Vector<T>::inv() { vInv<T>(_n,_X,_X); }; /// A <- x .* y template <typename T> inline void Vector<T>::mult(const Vector<T>& x, const Vector<T>& y) { this->resize(x.n()); vMul<T>(_n,x._X,y._X,_X); }; ; /// normalize the vector template <typename T> inline void Vector<T>::normalize() { T norm=nrm2(); if (norm > EPSILON) scal(1.0/norm); }; /// normalize the vector template <typename T> inline void Vector<T>::normalize2(const T thrs) { T norm=nrm2(); if (norm > thrs) scal(thrs/norm); }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_n))); INTM count[4]; for (INTM i = 0; i<4; ++i) count[i]=0; INTM offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; meanv[2*offsetx+offsety]+=_X[j*n+k]; count[2*offsetx+offsety]++; } } for (INTM i = 0; i<4; ++i) meanv[i] /= count[i]; offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]-=meanv[2*offsetx+offsety]; } } } else { const INTM V = meanv.n(); const INTM sizePatch=_n/V; for (INTM j = 0; j<V; ++j) { T mean = 0; for (INTM k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (INTM k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } } }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const Vector<T>& mask) { const INTM V = meanv.n(); const INTM sizePatch=_n/V; for (INTM j = 0; j<V; ++j) { T mean = 0; for (INTM k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= cblas_asum(sizePatch,mask._X+j*sizePatch,1); for (INTM k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } }; /// whiten template <typename T> inline void Vector<T>::whiten(const INTM V) { const INTM sizePatch=_n/V; for (INTM j = 0; j<V; ++j) { T mean = 0; for (INTM k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (INTM k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } } }; template <typename T> inline T Vector<T>::KL(const Vector<T>& Y) { T sum = 0; T* prY = Y.rawX(); for (INTM i = 0; i<_n; ++i) { if (_X[i] > 1e-20) { if (prY[i] < 1e-60) { sum += 1e200; } else { sum += _X[i]*log_alt<T>(_X[i]/prY[i]); } //sum += _X[i]*log_alt<T>(_X[i]/(prY[i]+1e-100)); } } sum += T(-1.0) + Y.sum(); return sum; }; /// unwhiten template <typename T> inline void Vector<T>::unwhiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const INTM n =static_cast<INTM>(sqrt(static_cast<T>(_n))); INTM offsetx=0; for (INTM j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; INTM offsety=0; for (INTM k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]+=meanv[2*offsetx+offsety]; } } } else { const INTM V = meanv.n(); const INTM sizePatch=_n/V; for (INTM j = 0; j<V; ++j) { T mean = meanv[j]; for (INTM k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]+=mean; } } } }; /// return the mean template <typename T> inline T Vector<T>::mean() const { return this->sum()/_n; } template <typename T> inline T Vector<T>::abs_mean() const { return this->asum()/_n; }; template <typename T> inline T Vector<T>::mean_non_uniform(const Vector<T>& qi) const { Vector<T> tmp; tmp.copy(*this); tmp.mult(qi,tmp); return tmp.sum(); }; /// return the std template <typename T> inline T Vector<T>::std() { T E = this->mean(); T std=0; for (INTM i = 0; i<_n; ++i) { T tmp=_X[i]-E; std += tmp*tmp; } std /= _n; return sqr_alt<T>(std); } /// scale the vector by a template <typename T> inline void Vector<T>::scal(const T a) { return cblas_scal<T>(_n,a,_X,1); }; /// A <- -A template <typename T> inline void Vector<T>::neg() { for (INTM i = 0; i<_n; ++i) _X[i]=-_X[i]; }; /// replace each value by its exponential template <typename T> inline void Vector<T>::exp() { vExp<T>(_n,_X,_X); }; /// replace each value by its absolute value template <typename T> inline void Vector<T>::abs_vec() { vAbs<T>(_n,_X,_X); }; /// replace each value by its logarithm template <typename T> inline void Vector<T>::log() { for (INTM i=0; i<_n; ++i) _X[i]=alt_log<T>(_X[i]); }; /// replace each value by its exponential template <typename T> inline void Vector<T>::logexp() { for (INTM i = 0; i<_n; ++i) { _X[i]=logexp2(_X[i]); /*if (_X[i] < -30) { _X[i]=0; } else if (_X[i] < 30) { _X[i]= alt_log<T>( T(1.0) + exp_alt<T>( _X[i] ) ); }*/ } }; template <typename T> inline T Vector<T>::logsumexp() { T mm=this->maxval(); this->add(-mm); this->exp(); return mm+alt_log<T>(this->asum()); }; /// replace each value by its exponential template <typename T> inline T Vector<T>::softmax(const int y) { this->add(-_X[y]); _X[y]=-INFINITY; T max=this->maxval(); if (max > 30) { return max; } else if (max < -30) { return 0; } else { _X[y]=T(0.0); this->exp(); return alt_log<T>(this->sum()); } }; /// computes the sum of the magnitudes of the vector template <typename T> inline T Vector<T>::asum() const { return cblas_asum<T>(_n,_X,1); }; template <typename T> inline T Vector<T>::lzero() const { INTM count=0; for (INTM i = 0; i<_n; ++i) if (_X[i] != 0) ++count; return count; }; template <typename T> inline T Vector<T>::afused() const { T sum = 0; for (INTM i = 1; i<_n; ++i) { sum += abs<T>(_X[i]-_X[i-1]); } return sum; } /// returns the sum of the vector template <typename T> inline T Vector<T>::sum() const { T sum=T(); for (INTM i = 0; i<_n; ++i) sum +=_X[i]; return sum; }; /// puts in signs, the sign of each poINTM in the vector template <typename T> inline void Vector<T>::sign(Vector<T>& signs) const { T* prSign=signs.rawX(); for (INTM i = 0; i<_n; ++i) { if (_X[i] == 0) { prSign[i]=0.0; } else { prSign[i] = _X[i] > 0 ? 1.0 : -1.0; } } }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project(Vector<T>& out, const T thrs, const bool simplex) const { out.copy(*this); if (simplex) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm1 = out.sum(); if (norm1 <= thrs) { if (!simplex) out.copy(*this); return; } T* prU = out._X; INTM sizeU = _n; T sum = T(); INTM sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; INTM sizeG=1; T sumG=pivot; for (INTM i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(sum_card + sizeG) <= thrs) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T lambda = (sum-thrs)/sum_card; out.copy(*this); if (simplex) { out.thrsPos(); } out.softThrshold(lambda); }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual) const { out.copy(*this); if (thrs==0) { out.setZeros(); return; } vAbs<T>(_n,out._X,out._X); out.div(weights); Vector<INTM> keys(_n); for (INTM i = 0; i<_n; ++i) keys[i]=i; out.sort2(keys,false); T sum1=0; T sum2=0; T lambda=0; for (INTM i = 0; i<_n; ++i) { const T lambda_old=lambda; const T fact=weights[keys[i]]*weights[keys[i]]; lambda=out[i]; sum2 += fact; sum1 += fact*lambda; if (sum1 - lambda*sum2 >= thrs) { sum2-=fact; sum1-=fact*lambda; lambda=lambda_old; break; } } lambda=MAX(0,(sum1-thrs)/sum2); if (residual) { for (INTM i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MIN(_X[i],lambda*weights[i]) : MAX(_X[i],-lambda*weights[i]); } } else { for (INTM i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MAX(0,_X[i]-lambda*weights[i]) : MIN(0,_X[i]+lambda*weights[i]); } } }; template <typename T> inline void Vector<T>::project_sft_binary(const Vector<T>& y) { T mean = this->mean(); Vector<T> ztilde, xtilde; ztilde.resize(_n); int count=0; if (mean > 0) { for (int ii=0; ii<_n; ++ii) if (y[ii] > 0) { count++; ztilde[ii]=_X[ii]+T(1.0); } else { ztilde[ii]= _X[ii]; } ztilde.l1project(xtilde,T(count)); for (int ii=0; ii<_n; ++ii) _X[ii] = y[ii] > 0 ? xtilde[ii]-T(1.0) : xtilde[ii]; } else { for (int ii=0; ii<_n; ++ii) if (y[ii] > 0) { ztilde[ii]=-_X[ii]; } else { count++; ztilde[ii]=- _X[ii] + T(1.0); } ztilde.l1project(xtilde,T(count)); for (int ii=0; ii<_n; ++ii) _X[ii] = y[ii] > 0 ? -xtilde[ii] : -xtilde[ii]+T(1.0); } }; template <typename T> inline void Vector<T>::project_sft(const Vector<int>& labels, const int clas) { Vector<T> y(_n); for (int ii=0; ii<_n; ++ii) y[ii] = labels[ii]==clas ? T(1.0) : -T(1.0); this->project_sft_binary(y); /* T mean = this->mean(); T thrs=mean; while (abs(mean) > EPSILON) { INTM n_seuils=0; for (INTM i = 0; i< _n; ++i) { _X[i] = _X[i]-thrs; if (labels[i]==clas) { if (_X[i] < -1.0) { _X[i]=-1.0; ++n_seuils; } } else { if (_X[i] < 0) { ++n_seuils; _X[i]=0; } } } mean = this->mean(); thrs= mean * _n/(_n-n_seuils);*/ //} }; template <typename T> inline void Vector<T>::sparseProject(Vector<T>& out, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_1 <= thrs this->l1project(out,thrs,pos); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 > 1e-10) { this->scal(lambda1); this->l1l2project(out,thrs,2.0/(lambda1*lambda1),pos); this->scal(T(1.0/lambda1)); out.scal(T(1.0/lambda1)); } else { out.copy(*this); out.normalize2(); out.scal(sqrt(thrs)); } } else if (mode == 3) { /// min_u ||b-u||_2^2 / ||u||_1 + (lambda1/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,lambda1,pos); } else if (mode == 4) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(lambda1); T nrm=out.nrm2sq(); if (nrm > thrs) out.scal(sqr_alt<T>(thrs/nrm)); } else if (mode == 5) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) / ||u||_2^2 <= thrs // this->fusedProject(out,lambda1,lambda2,100); // T nrm=out.nrm2sq(); // if (nrm > thrs) // out.scal(sqr_alt<T>(thrs/nrm)); // } else if (mode == 6) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) +0.5lambda_3 ||u||_2^2 this->fusedProjectHomotopy(out,lambda1,lambda2,lambda3,true); } else if (mode==6) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= thrs this->fusedProjectHomotopy(out,lambda1/thrs,lambda2/thrs,lambda3/thrs,false); } else { /// min_u ||b-u||_2^2 / (1-lambda1)*||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 < 1e-10) { out.copy(*this); if (pos) out.thrsPos(); out.normalize2(); out.scal(sqrt(thrs)); } else if (lambda1 > 0.999999) { this->l1project(out,thrs,pos); } else { this->sparseProject(out,thrs/(1.0-lambda1),2,lambda1/(1-lambda1),0,0,pos); } } }; /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos, const int mode) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_2^2 + gamma ||u||_1 <= thrs this->scal(gamma); this->l1l2project(out,thrs,2.0/(gamma*gamma),pos); this->scal(T(1.0/gamma)); out.scal(T(1.0/gamma)); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,gamma,pos); } else if (mode == 3) { /// min_u 0.5||b-u||_2^2 + gamma||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(gamma); T nrm=out.nrm2(); if (nrm > thrs) out.scal(thrs/nrm); } } /// returns true if the returned vector is null /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs template <typename T> inline void Vector<T>::l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos) const { if (gamma == 0) return this->l1project(out,thrs,pos); out.copy(*this); if (pos) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm = out.sum() + gamma*out.nrm2sq(); if (norm <= thrs) { if (!pos) out.copy(*this); return; } /// BEGIN T* prU = out._X; INTM sizeU = _n; T sum = 0; INTM sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; INTM sizeG=1; T sumG=pivot+0.5*gamma*pivot*pivot; for (INTM i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]+0.5*gamma*prU[i]*prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(1+0.5*gamma*pivot)*(sum_card + sizeG) < thrs*(1+gamma*pivot)*(1+gamma*pivot)) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T a = gamma*gamma*thrs+0.5*gamma*sum_card; T b = 2*gamma*thrs+sum_card; T c=thrs-sum; T delta = b*b-4*a*c; T lambda = (-b+sqrt(delta))/(2*a); out.copy(*this); if (pos) { out.thrsPos(); } out.fastSoftThrshold(lambda); out.scal(T(1.0/(1+lambda*gamma))); }; template <typename T> static inline T fusedHomotopyAux(const bool& sign1, const bool& sign2, const bool& sign3, const T& c1, const T& c2) { if (sign1) { if (sign2) { return sign3 ? 0 : c2; } else { return sign3 ? -c2-c1 : -c1; } } else { if (sign2) { return sign3 ? c1 : c1+c2; } else { return sign3 ? -c2 : 0; } } }; template <typename T> inline void Vector<T>::fusedProjectHomotopy(Vector<T>& alpha, const T lambda1,const T lambda2,const T lambda3, const bool penalty) { T* pr_DtR=_X; const INTM K = _n; alpha.setZeros(); Vector<T> u(K); // regularization path for gamma Vector<T> Du(K); // regularization path for alpha Vector<T> DDu(K); // regularization path for alpha Vector<T> gamma(K); // auxiliary variable Vector<T> c(K); // auxiliary variables Vector<T> scores(K); // auxiliary variables gamma.setZeros(); T* pr_gamma = gamma.rawX(); T* pr_u = u.rawX(); T* pr_Du = Du.rawX(); T* pr_DDu = DDu.rawX(); T* pr_c = c.rawX(); T* pr_scores = scores.rawX(); Vector<INTM> ind(K+1); Vector<bool> signs(K); ind.set(K); INTM* pr_ind = ind.rawX(); bool* pr_signs = signs.rawX(); /// Computation of DtR T sumBeta = this->sum(); /// first element is selected, gamma and alpha are updated pr_gamma[0]=sumBeta/K; /// update alpha alpha.set(pr_gamma[0]); /// update DtR this->sub(alpha); for (INTM j = K-2; j>=0; --j) pr_DtR[j] += pr_DtR[j+1]; pr_DtR[0]=0; pr_ind[0]=0; pr_signs[0] = pr_DtR[0] > 0; pr_c[0]=T(1.0)/K; INTM currentInd=this->fmax(); T currentLambda=abs<T>(pr_DtR[currentInd]); bool newAtom = true; /// Solve the Lasso using simplified LARS for (INTM i = 1; i<K; ++i) { /// exit if constraINTMs are satisfied /// min_u ||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 if (penalty && currentLambda <= lambda2) break; if (!penalty) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= 1.0 scores.copy(alpha); scores.softThrshold(lambda1*currentLambda/lambda2); scores.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); if (lambda1*scores.asum()+lambda2*scores.afused()+0.5* lambda3*scores.nrm2sq() >= T(1.0)) break; } /// Update pr_ind and pr_c if (newAtom) { INTM j; for (j = 1; j<i; ++j) if (pr_ind[j] > currentInd) break; for (INTM k = i; k>j; --k) { pr_ind[k]=pr_ind[k-1]; pr_c[k]=pr_c[k-1]; pr_signs[k]=pr_signs[k-1]; } pr_ind[j]=currentInd; pr_signs[j]=pr_DtR[currentInd] > 0; pr_c[j-1]=T(1.0)/(pr_ind[j]-pr_ind[j-1]); pr_c[j]=T(1.0)/(pr_ind[j+1]-pr_ind[j]); } // Compute u pr_u[0]= pr_signs[1] ? -pr_c[0] : pr_c[0]; if (i == 1) { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; } else { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; pr_u[1]+=pr_signs[2] ? -pr_c[1] : pr_c[1]; for (INTM j = 2; j<i; ++j) { pr_u[j]=2*fusedHomotopyAux<T>(pr_signs[j-1], pr_signs[j],pr_signs[j+1], pr_c[j-1],pr_c[j]); } pr_u[i] = pr_signs[i-1] ? -pr_c[i-1] : pr_c[i-1]; pr_u[i] += pr_signs[i] ? pr_c[i-1]+pr_c[i] : -pr_c[i-1]-pr_c[i]; } // Compute Du pr_Du[0]=pr_u[0]; for (INTM k = 1; k<pr_ind[1]; ++k) pr_Du[k]=pr_Du[0]; for (INTM j = 1; j<=i; ++j) { pr_Du[pr_ind[j]]=pr_Du[pr_ind[j]-1]+pr_u[j]; for (INTM k = pr_ind[j]+1; k<pr_ind[j+1]; ++k) pr_Du[k]=pr_Du[pr_ind[j]]; } /// Compute DDu DDu.copy(Du); for (INTM j = K-2; j>=0; --j) pr_DDu[j] += pr_DDu[j+1]; /// Check constraINTMs T max_step1 = INFINITY; if (penalty) { max_step1 = currentLambda-lambda2; } /// Check changes of sign T max_step2 = INFINITY; INTM step_out = -1; for (INTM j = 1; j<=i; ++j) { T ratio = -pr_gamma[pr_ind[j]]/pr_u[j]; if (ratio > 0 && ratio <= max_step2) { max_step2=ratio; step_out=j; } } T max_step3 = INFINITY; /// Check new variables entering the active set for (INTM j = 1; j<K; ++j) { T sc1 = (currentLambda-pr_DtR[j])/(T(1.0)-pr_DDu[j]); T sc2 = (currentLambda+pr_DtR[j])/(T(1.0)+pr_DDu[j]); if (sc1 <= 1e-10) sc1=INFINITY; if (sc2 <= 1e-10) sc2=INFINITY; pr_scores[j]= MIN(sc1,sc2); } for (INTM j = 0; j<=i; ++j) { pr_scores[pr_ind[j]]=INFINITY; } currentInd = scores.fmin(); max_step3 = pr_scores[currentInd]; T step = MIN(max_step1,MIN(max_step3,max_step2)); if (step == 0 || step == INFINITY) break; /// Update gamma, alpha, DtR, currentLambda for (INTM j = 0; j<=i; ++j) { pr_gamma[pr_ind[j]]+=step*pr_u[j]; } alpha.add(Du,step); this->add(DDu,-step); currentLambda -= step; if (step == max_step2) { /// Update signs,pr_ind, pr_c for (INTM k = step_out; k<=i; ++k) pr_ind[k]=pr_ind[k+1]; pr_ind[i]=K; for (INTM k = step_out; k<=i; ++k) pr_signs[k]=pr_signs[k+1]; pr_c[step_out-1]=T(1.0)/(pr_ind[step_out]-pr_ind[step_out-1]); pr_c[step_out]=T(1.0)/(pr_ind[step_out+1]-pr_ind[step_out]); i-=2; newAtom=false; } else { newAtom=true; } } if (penalty) { alpha.softThrshold(lambda1); alpha.scal(T(1.0/(1.0+lambda3))); } else { alpha.softThrshold(lambda1*currentLambda/lambda2); alpha.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); } }; template <typename T> inline void Vector<T>::fusedProject(Vector<T>& alpha, const T lambda1, const T lambda2, const int itermax) { T* pr_alpha= alpha.rawX(); T* pr_beta=_X; const INTM K = alpha.n(); T total_alpha =alpha.sum(); /// Modification of beta for (INTM i = K-2; i>=0; --i) pr_beta[i]+=pr_beta[i+1]; for (INTM i = 0; i<itermax; ++i) { T sum_alpha=0; T sum_diff = 0; /// Update first coordinate T gamma_old=pr_alpha[0]; pr_alpha[0]=(K*gamma_old+pr_beta[0]- total_alpha)/K; T diff = pr_alpha[0]-gamma_old; sum_diff += diff; sum_alpha += pr_alpha[0]; total_alpha +=K*diff; /// Update alpha_j for (INTM j = 1; j<K; ++j) { pr_alpha[j]+=sum_diff; T gamma_old=pr_alpha[j]-pr_alpha[j-1]; T gamma_new=softThrs((K-j)*gamma_old+pr_beta[j]- (total_alpha-sum_alpha),lambda2)/(K-j); pr_alpha[j]=pr_alpha[j-1]+gamma_new; T diff = gamma_new-gamma_old; sum_diff += diff; sum_alpha+=pr_alpha[j]; total_alpha +=(K-j)*diff; } } alpha.softThrshold(lambda1); }; /// sort the vector template <typename T> inline void Vector<T>::sort(const bool mode) { if (mode) { lasrt<T>(incr,_n,_X); } else { lasrt<T>(decr,_n,_X); } }; /// sort the vector template <typename T> inline void Vector<T>::sort(Vector<T>& out, const bool mode) const { out.copy(*this); out.sort(mode); }; template <typename T> inline void Vector<T>::sort2(Vector<INTM>& key, const bool mode) { quick_sort(key.rawX(),_X,(INTM)0,_n-1,mode); }; template <typename T> inline void Vector<T>::sort2(Vector<T>& out, Vector<INTM>& key, const bool mode) const { out.copy(*this); out.sort2(key,mode); } template <typename T> inline void Vector<T>::applyBayerPattern(const int offset) { INTM sizePatch=_n/3; INTM n = static_cast<INTM>(sqrt(static_cast<T>(sizePatch))); if (offset == 0) { // R for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 1 : 2; const INTM off = (i % 2) ? 0 : 1; for (INTM j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (INTM i = 0; i<n; ++i) { const INTM step = 2; const INTM off = (i % 2) ? 1 : 0; for (INTM j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 2 : 1; const INTM off = 0; for (INTM j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 1) { // R for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 2 : 1; const INTM off = (i % 2) ? 1 : 0; for (INTM j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (INTM i = 0; i<n; ++i) { const INTM step = 2; const INTM off = (i % 2) ? 0 : 1; for (INTM j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 1 : 2; const INTM off = 0; for (INTM j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 2) { // R for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 1 : 2; const INTM off = 0; for (INTM j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (INTM i = 0; i<n; ++i) { const INTM step = 2; const INTM off = (i % 2) ? 0 : 1; for (INTM j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 2 : 1; const INTM off = (i % 2) ? 1 : 0; for (INTM j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 3) { // R for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 2 : 1; const INTM off = 0; for (INTM j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (INTM i = 0; i<n; ++i) { const INTM step = 2; const INTM off = (i % 2) ? 1 : 0; for (INTM j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (INTM i = 0; i<n; ++i) { const INTM step = (i % 2) ? 1 : 2; const INTM off = (i % 2) ? 0 : 1; for (INTM j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } }; /// make a sparse copy template <typename T> inline void Vector<T>::toSparse( SpVector<T>& vec) const { INTM L=0; T* v = vec._v; INTM* r = vec._r; for (INTM i = 0; i<_n; ++i) { if (_X[i] != T()) { v[L]=_X[i]; r[L++]=i; } } vec._L=L; }; template <typename T> inline void Vector<T>::copyMask(Vector<T>& out, Vector<bool>& mask) const { out.resize(_n); INTM pointer=0; for (INTM i = 0; i<_n; ++i) { if (mask[i]) out[pointer++]=_X[i]; } out.setn(pointer); }; template <typename T> inline void Matrix<T>::copyMask(Matrix<T>& out, Vector<bool>& mask) const { out.resize(_m,_n); INTM count=0; for (INTM i = 0; i<mask.n(); ++i) if (mask[i]) ++count; out.setm(count); for (INTM i = 0; i<_n; ++i) { INTM pointer=0; for (INTM j = 0; j<_m; ++j) { if (mask[j]) { out[i*count+pointer]=_X[i*_m+j]; ++pointer; } } } }; /* **************************** * Implementation of SpMatrix * ****************************/ /// Constructor, CSC format, existing data template <typename T, typename I> SpMatrix<T,I>::SpMatrix(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax) : _externAlloc(true), _v(v), _r(r), _pB(pB), _pE(pE), _m(m), _n(n), _nzmax(nzmax) { }; /// Constructor, new m x n matrix, with at most nzmax non-zeros values template <typename T, typename I> SpMatrix<T,I>::SpMatrix(I m, I n, I nzmax) : _externAlloc(false), _m(m), _n(n), _nzmax(nzmax) { #pragma omp critical { _v=new T[nzmax]; _r=new I[nzmax]; _pB=new I[_n+1]; } _pE=_pB+1; }; /// Empty constructor template <typename T, typename I> SpMatrix<T,I>::SpMatrix() : _externAlloc(true), _v(NULL), _r(NULL), _pB(NULL), _pE(NULL), _m(0),_n(0),_nzmax(0) { }; template <typename T, typename I> inline void SpMatrix<T,I>::copy(const SpMatrix<T,I>& mat) { this->resize(mat._m,mat._n,mat._nzmax); memcpy(_v,mat._v,_nzmax*sizeof(T)); memcpy(_r,mat._r,_nzmax*sizeof(I)); memcpy(_pB,mat._pB,(_n+1)*sizeof(I)); } /// Destructor template <typename T, typename I> SpMatrix<T,I>::~SpMatrix() { clear(); }; /// reference the column i Io vec template <typename T, typename I> inline void SpMatrix<T,I>::refCol(I i, SpVector<T,I>& vec) const { if (vec._nzmax > 0) vec.clear(); vec._v=_v+_pB[i]; vec._r=_r+_pB[i]; vec._externAlloc=true; vec._L=_pE[i]-_pB[i]; vec._nzmax=vec._L; }; /// print the sparse matrix template<typename T, typename I> inline void SpMatrix<T,I>::print(const string& name) const { cerr << name << endl; cerr << _m << " x " << _n << " , " << _nzmax << endl; for (I i = 0; i<_n; ++i) { for (I j = _pB[i]; j<_pE[i]; ++j) { cerr << "(" <<_r[j] << "," << i << ") = " << _v[j] << endl; } } }; template<typename T, typename I> inline T SpMatrix<T,I>::operator[](const I index) const { const I num_col=(index/_m); const I num_row=index -num_col*_m; T val = 0; for (I j = _pB[num_col]; j<_pB[num_col+1]; ++j) { if (_r[j]==num_row) { val=_v[j]; break; } } return val; }; template<typename T, typename I> void SpMatrix<T,I>::getData(Vector<T>& data, const I index) const { data.resize(_m); data.setZeros(); for (I i = _pB[index]; i< _pB[index+1]; ++i) data[_r[i]]=_v[i]; }; template <typename T, typename I> void SpMatrix<T,I>::setData(T* v, I* r, I* pB, I* pE, I m, I n, I nzmax) { this->clear(); _externAlloc =true; _v = v; _r=r; _pB=pB; _pE=pE; _m=m; _n=n; _nzmax=nzmax; } /// compute the sum of the matrix elements template <typename T, typename I> inline T SpMatrix<T,I>::asum() const { return cblas_asum<T>(_pB[_n],_v,1); }; /// compute the sum of the matrix elements template <typename T, typename I> inline T SpMatrix<T,I>::normFsq() const { return cblas_dot<T>(_pB[_n],_v,1,_v,1); }; template <typename T, typename I> inline void SpMatrix<T,I>::add_direct(const SpMatrix<T,I>& mat, const T a) { Vector<T> v2(mat._v,mat._nzmax); Vector<T> v1(_v,_nzmax); v1.add(v2,a); } template <typename T, typename I> inline void SpMatrix<T,I>::copy_direct(const SpMatrix<T,I>& mat) { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); v1.copy(v2); } template <typename T, typename I> inline T SpMatrix<T,I>::dot_direct(const SpMatrix<T,I>& mat) const { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); return v1.dot(v2); } /// clear the matrix template <typename T, typename I> inline void SpMatrix<T,I>::clear() { if (!_externAlloc) { delete[](_r); delete[](_v); delete[](_pB); } _n=0; _m=0; _nzmax=0; _v=NULL; _r=NULL; _pB=NULL; _pE=NULL; _externAlloc=true; }; /// resize the matrix template <typename T, typename I> inline void SpMatrix<T,I>::resize(const I m, const I n, const I nzmax) { if (n == _n && m == _m && nzmax == _nzmax) return; this->clear(); _n=n; _m=m; _nzmax=nzmax; _externAlloc=false; #pragma omp critical { _v = new T[nzmax]; _r = new I[nzmax]; _pB = new I[_n+1]; } _pE = _pB+1; for (I i = 0; i<=_n; ++i) _pB[i]=0; }; /// resize the matrix template <typename T, typename I> inline void SpMatrix<T,I>::scal(const T a) const { cblas_scal<T>(_pB[_n],a,_v,1); }; ///// resize the matrix template <typename T, typename I> inline T SpMatrix<T,I>::abs_mean() const { Vector<T> vec(_v,_pB[_n]); return vec.abs_mean(); }; /// y <- A'*x template <typename T, typename I> inline void SpMatrix<T,I>::multTrans(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); #pragma omp parallel for for (I i = 0; i<_n; ++i) { T sum=T(); for (I j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*prX[_r[j]]; } y[i] += alpha*sum; } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T, typename I> inline void SpMatrix<T,I>::multTrans(const SpVector<T,I>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); SpVector<T,I> col; for (I i = 0; i<_n; ++i) { this->refCol(i,col); prY[i] += alpha*x.dot(col); } }; /// y <- A*x template <typename T, typename I> inline void SpMatrix<T,I>::mult(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); for (I i = 0; i<_n; ++i) { T sca=alpha* prX[i]; for (I j = _pB[i]; j<_pE[i]; ++j) { y[_r[j]] += sca*_v[j]; } } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T, typename I> inline void SpMatrix<T,I>::mult(const SpVector<T,I>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); for (I i = 0; i<x.L(); ++i) { I ind=x.r(i); T val = alpha * x.v(i); for (I j = _pB[ind]; j<_pE[ind]; ++j) { prY[_r[j]] += val *_v[j]; } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T, typename I> inline void SpMatrix<T,I>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> tmp; Vector<T> row(B.m()); for (I i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> tmp; Vector<T> row(B.n()); for (I i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> row(B.n()); Vector<T> col; for (I i = 0; i<B.m(); ++i) { B.copyRow(i,row); C.refCol(i,col); this->mult(row,col,a,T(1.0)); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colB; Vector<T> colC; for (I i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a,T(1.0)); } } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T, typename I> inline void SpMatrix<T,I>::mult(const SpMatrix<T,I>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> tmp; Vector<T> row(B.m()); for (I i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> tmp; Vector<T> row(B.n()); for (I i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> colB; SpVector<T,I> colA; for (I i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T,I> colB; Vector<T> colC; for (I i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a); } } } }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T, typename I> inline void SpMatrix<T,I>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; template <typename T, typename I> inline T SpMatrix<T,I>::dot(const Matrix<T>& x) const { T sum=0; for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*x(_r[j],j); } return sum; }; template <typename T, typename I> inline void SpMatrix<T,I>::copyRow(const I ind, Vector<T>& x) const { x.resize(_n); x.setZeros(); for (I i = 0; i<_n; ++i) { for (I j = _pB[i]; j<_pE[i]; ++j) { if (_r[j]==ind) { x[i]=_v[j]; } else if (_r[j] > ind) { break; } } } }; template <typename T, typename I> inline void SpMatrix<T,I>::addVecToCols( const Vector<T>& vec, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]; } else { for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]; } }; template <typename T, typename I> inline void SpMatrix<T,I>::addVecToColsWeighted( const Vector<T>& vec, const T* weights, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]*weights[j-_pB[i]]; } else { for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]*weights[j-_pB[i]]; } }; template <typename T, typename I> inline void SpMatrix<T,I>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); SpVector<T,I> tmp; for (I i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// aat <- A*A' template <typename T, typename I> inline void SpMatrix<T,I>::AAt(Matrix<T>& aat) const { I i,j,k; I K=_m; I M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } template <typename T, typename I> inline void SpMatrix<T,I>::XtX(Matrix<T>& XtX) const { XtX.resize(_n,_n); XtX.setZeros(); SpVector<T,I> col; Vector<T> col_out; for (I i = 0; i<_n; ++i) { this->refCol(i,col); XtX.refCol(i,col_out); this->multTrans(col,col_out); } }; /// aat <- A(:,indices)*A(:,indices)' template <typename T, typename I> inline void SpMatrix<T,I>::AAt(Matrix<T>& aat, const Vector<I>& indices) const { I i,j,k; I K=_m; I M=indices.n(); /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { I ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { for (k = _pB[ii]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// aat <- sum_i w_i A(:,i)*A(:,i)' template <typename T, typename I> inline void SpMatrix<T,I>::wAAt(const Vector<T>& w, Matrix<T>& aat) const { I i,j,k; I K=_m; I M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=w._X[i]*_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// XAt <- X*A' template <typename T, typename I> inline void SpMatrix<T,I>::XAt(const Matrix<T>& X, Matrix<T>& XAt) const { I j,i; I n=X._m; I K=_m; I M=_n; XAt.resize(n,K); /* compute X alpha^T */ // int NUM_THREADS=init_omp(MAX_THREADS); //T* XatT=new T[NUM_THREADS*n*K]; //for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); //#pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { //#ifdef _OPENMP // int numT=omp_get_thread_num(); //#else // int numT=0; //#endif // T* write_area=XatT+numT*n*K; for (j = _pB[i]; j<_pE[i]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,XAt._X+_r[j]*n,1); } } // cblas_copy<T>(n*K,XatT,1,XAt._X,1); // for (i = 1; i<NUM_THREADS; ++i) // cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); // delete[](XatT); }; /// XAt <- X(:,indices)*A(:,indices)' template <typename T, typename I> inline void SpMatrix<T,I>::XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<I>& indices) const { I j,i; I n=X._m; I K=_m; I M=indices.n(); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(MAX_THREADS); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { I ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=XatT+numT*n*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// XAt <- sum_i w_i X(:,i)*A(:,i)' template <typename T, typename I> inline void SpMatrix<T,I>::wXAt(const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numThreads) const { I j,l,i; I n=X._m; I K=_m; I M=_n; I Mx = X._n; I numRepX= M/Mx; assert(numRepX*Mx == M); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(numThreads); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j,l) for (i = 0; i<Mx; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T * write_area=XatT+numT*n*K; for (l = 0; l<numRepX; ++l) { I ind=numRepX*i+l; if (w._X[ind] != 0) for (j = _pB[ind]; j<_pE[ind]; ++j) { cblas_axpy<T>(n,w._X[ind]*_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// copy the sparse matrix into a dense matrix template<typename T, typename I> inline void SpMatrix<T,I>::toFull(Matrix<T>& matrix) const { matrix.resize(_m,_n); matrix.setZeros(); T* out = matrix._X; for (I i=0; i<_n; ++i) { for (I j = _pB[i]; j<_pE[i]; ++j) { out[i*_m+_r[j]]=_v[j]; } } }; /// copy the sparse matrix into a full dense matrix template <typename T, typename I> inline void SpMatrix<T,I>::toFullTrans( Matrix<T>& matrix) const { matrix.resize(_n,_m); matrix.setZeros(); T* out = matrix._X; for (I i=0; i<_n; ++i) { for (I j = _pB[i]; j<_pE[i]; ++j) { out[i+_r[j]*_n]=_v[j]; } } }; /// use the data from v, r for _v, _r template <typename T, typename I> inline void SpMatrix<T,I>::convert(const Matrix<T>&vM, const Matrix<I>& rM, const I K) { const I M = rM.n(); const I L = rM.m(); const I* r = rM.X(); const T* v = vM.X(); I count=0; for (I i = 0; i<M*L; ++i) if (r[i] != -1) ++count; resize(K,M,count); count=0; for (I i = 0; i<M; ++i) { _pB[i]=count; for (I j = 0; j<L; ++j) { if (r[i*L+j] == -1) break; _v[count]=v[i*L+j]; _r[count++]=r[i*L+j]; } _pE[i]=count; } for (I i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// use the data from v, r for _v, _r template <typename T, typename I> inline void SpMatrix<T,I>::convert2( const Matrix<T>&vM, const Vector<I>& rv, const I K) { const I M = vM.n(); const I L = vM.m(); I* r = rv.rawX(); const T* v = vM.X(); I LL=0; for (I i = 0; i<L; ++i) if (r[i] != -1) ++LL; this->resize(K,M,LL*M); I count=0; for (I i = 0; i<M; ++i) { _pB[i]=count; for (I j = 0; j<LL; ++j) { _v[count]=v[i*L+j]; _r[count++]=r[j]; } _pE[i]=count; } for (I i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// returns the l2 norms ^2 of the columns template <typename T, typename I> inline void SpMatrix<T,I>::normalize() { SpVector<T,I> col; for (I i = 0; i<_n; ++i) { this->refCol(i,col); const T norm = col.nrm2sq(); if (norm > 1e-10) col.scal(T(1.0)/col.nrm2sq()); } }; /// returns the l2 norms ^2 of the columns template <typename T, typename I> inline void SpMatrix<T,I>::normalize_rows() { Vector<T> norms(_m); norms.setZeros(); for (I i = 0; i<_n; ++i) { for (I j = _pB[i]; j<_pE[i]; ++j) { norms[_r[j]] += _v[j]*_v[j]; } } norms.Sqrt(); for (I i = 0; i<_m; ++i) norms[i] = norms[i] < 1e-10 ? T(1.0) : T(1.0)/norms[i]; for (I i = 0; i<_n; ++i) for (I j = _pB[i]; j<_pE[i]; ++j) _v[j] *= norms[_r[j]]; }; /// returns the l2 norms ^2 of the columns template <typename T, typename I> inline void SpMatrix<T,I>::norm_2sq_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T,I> col; for (I i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T, typename I> inline void SpMatrix<T,I>::norm_0_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T,I> col; for (I i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = static_cast<T>(col.length()); } }; template <typename T, typename I> inline void SpMatrix<T,I>::norm_1_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T,I> col; for (I i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] =col.asum(); } }; /* *************************** * Implementation of SpVector * ***************************/ /// Constructor, of the sparse vector of size L. template <typename T, typename I> SpVector<T,I>::SpVector(T* v, I* r, I L, I nzmax) : _externAlloc(true), _v(v), _r(r), _L(L), _nzmax(nzmax) { }; /// Constructor, allocates nzmax slots template <typename T, typename I> SpVector<T,I>::SpVector(I nzmax) : _externAlloc(false), _L(0), _nzmax(nzmax) { #pragma omp critical { _v = new T[nzmax]; _r = new I[nzmax]; } }; /// Empty constructor template <typename T, typename I> SpVector<T,I>::SpVector() : _externAlloc(true), _v(NULL), _r(NULL), _L(0), _nzmax(0) { }; /// Destructor template <typename T, typename I> SpVector<T,I>::~SpVector() { clear(); }; /// computes the sum of the magnitude of the elements template <typename T, typename I> inline T SpVector<T,I>::asum() const { return cblas_asum<T>(_L,_v,1); }; /// computes the l2 norm ^2 of the vector template <typename T, typename I> inline T SpVector<T,I>::nrm2sq() const { return cblas_dot<T>(_L,_v,1,_v,1); }; /// computes the l2 norm of the vector template <typename T, typename I> inline T SpVector<T,I>::nrm2() const { return cblas_nrm2<T>(_L,_v,1); }; /// computes the l2 norm of the vector template <typename T, typename I> inline T SpVector<T,I>::fmaxval() const { Vector<T> tmp(_v,_L); return tmp.fmaxval(); }; /// print the vector to std::cerr template <typename T, typename I> inline void SpVector<T,I>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _nzmax << std::endl; for (I i = 0; i<_L; ++i) cerr << "(" <<_r[i] << ", " << _v[i] << ")" << endl; }; /// create a reference on the vector r template <typename T, typename I> inline void SpVector<T,I>::refIndices( Vector<I>& indices) const { indices.setPointer(_r,_L); }; template <typename T, typename I> inline void SpVector<T,I>::getIndices(Vector<int>& indices) const { // indices.resize(_L); indices.setn(_L); for (int ii=0; ii<_L; ++ii) indices[ii]=_r[ii]; }; /// creates a reference on the vector val template <typename T, typename I> inline void SpVector<T,I>::refVal( Vector<T>& val) const { val.setPointer(_v,_L); }; /// a <- a.^2 template <typename T, typename I> inline void SpVector<T,I>::sqr() { vSqr<T>(_L,_v,_v); }; template <typename T, typename I> inline void SpVector<T,I>::scal(const T a) { cblas_scal<T>(_L,a,_v,1); }; template <typename T, typename I> inline T SpVector<T,I>::dot(const SpVector<T,I>& vec) const { T sum=T(); I countI = 0; I countJ = 0; while (countI < _L && countJ < vec._L) { const I rI = _r[countI]; const I rJ = vec._r[countJ]; if (rI > rJ) { ++countJ; } else if (rJ > rI) { ++countI; } else { sum+=_v[countI]*vec._v[countJ]; ++countI; ++countJ; } } return sum; }; template <typename T, typename I> inline T SpVector<T,I>::dot(const Vector<T>& vec) const { //return cblas_doti(_L,_v,_r,vec.rawX()); T sum=T(); for (int countI=0; countI < _L; ++countI) sum+=_v[countI]*vec[_r[countI]]; return sum; }; /// clears the vector template <typename T, typename I> inline void SpVector<T,I>::clear() { if (!_externAlloc) { delete[](_v); delete[](_r); } _v=NULL; _r=NULL; _L=0; _nzmax=0; _externAlloc=true; }; /// resizes the vector template <typename T, typename I> inline void SpVector<T,I>::resize(const I nzmax) { if (_nzmax != nzmax) { clear(); _nzmax=nzmax; _L=0; _externAlloc=false; #pragma omp critical { _v=new T[nzmax]; _r=new I[nzmax]; } } }; template <typename T, typename I> void inline SpVector<T,I>::toSpMatrix( SpMatrix<T,I>& out, const I m, const I n) const { out.resize(m,n,_L); cblas_copy<T>(_L,_v,1,out._v,1); I current_col=0; I* out_r=out._r; I* out_pB=out._pB; out_pB[0]=current_col; for (I i = 0; i<_L; ++i) { I col=_r[i]/m; if (col > current_col) { out_pB[current_col+1]=i; current_col++; i--; } else { out_r[i]=_r[i]-col*m; } } for (current_col++ ; current_col < n+1; ++current_col) out_pB[current_col]=_L; }; template <typename T, typename I> void inline SpVector<T,I>::toFull(Vector<T>& out) const { out.setZeros(); T* X = out.rawX(); for (I i = 0; i<_L; ++i) X[_r[i]]=_v[i]; }; #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-511,512)),ceild(8*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(4*t1+Nx+5,2048)),floord(8*t2+Nx+4,2048)),floord(8*t3+Nx+4,2048)),floord(8*t1-8*t2+Nz+Nx+3,2048));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),2048*t4+2046),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "hicoo.h" #include <omp.h> #define CHUNKSIZE 1 int sptOmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk); int sptOmpMTTKRPHiCOOBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); int sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb); /** * Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] hitsr the HiCOO sparse tensor input * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". */ int sptOmpMTTKRPHiCOO( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOBlocks(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); } else if(tk == 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); } else if(tk > 1 && tb > 1) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); } else if(tk == 1 && tb == 1) { printf("Should specify sequetial MTTKRP with -d -2.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if (balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb, int balanced) { if(tk > 1 && tb == 1) { if(balanced == 0) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); else if (balanced == 1) sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { if(tk > 1 && tb == 1) { sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); } else { printf("Haven't support block parallelism.\n"); return -1; } return 0; } int sptOmpMTTKRPHiCOOKernels( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptNnzIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries } // End loop blocks /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOKernels_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k=0; k<num_kernel_dim; ++k) { int tid = omp_get_thread_num(); // printf("tid: %d, (i, k): (%u, %u)\n", tid, i, k); if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; // printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim); #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop partitions */ for(sptIndex p=0; p<npars; ++p) { /* Loop kernels */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; int tid = omp_get_thread_num(); sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; /* Loop inside a partition */ for(sptIndex j = j_begin; j < j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; // printf("tid: %d, (i, j): (%u, %u), kernel_num: %u\n", tid, i, j, kernel_num); sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop inside a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End kernels in a partition } // End loop kernels } // End loop iterations /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } #ifdef NNZ_STATISTICS /* Calculate load balance of kernels */ sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode]; sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode]; sptIndex npars = hitsr->nkpars[mode]; #ifdef NNZ_STATISTICS sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex)); memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex)); #endif /* Loop parallel iterations */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex p=0; p<npars; ++p) { int tid = omp_get_thread_num(); /* Loop kernels */ for(sptIndex i=0; i<num_kernel_dim; ++i) { if(p >= kschr_balanced_pos_mode[i].len - 1) continue; sptIndex j_begin = kschr_balanced_pos_mode[i].data[p]; sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1]; for(sptIndex j=j_begin; j<j_end; ++j) { sptIndex kernel_num = kschr_balanced_mode[i].data[j]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End kernels in a partition } // End loop kernels } // End loop partitions /* Process using atomics */ #ifdef NNZ_STATISTICS #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs) #else #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) #endif for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) { int tid = omp_get_thread_num(); sptIndex kernel_num = hitsr->kschr_rest[mode].data[k]; sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num]; sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Use copy_mats to reduce atomics */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; #ifdef NNZ_STATISTICS thread_nnzs[tid] += (bptr_end - bptr_begin); #endif /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update bmvals_row[r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } /* Calculate load balance of kernels */ #ifdef NNZ_STATISTICS sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0; double std_nnzs = 0.0; double avg_nnzs = hitsr->nnz / (double)tk; // printf("thread_nnzs:\n"); for(int i = 0; i < tk; ++i) { // printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]); sum_nnzs += thread_nnzs[i]; if(min_nnzs > thread_nnzs[i]) min_nnzs = thread_nnzs[i]; if(max_nnzs < thread_nnzs[i]) max_nnzs = thread_nnzs[i]; std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs); } // printf("\n"); std_nnzs = sqrt(std_nnzs / tk); printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs); sptAssert(sum_nnzs == hitsr->nnz); free(thread_nnzs); #endif return 0; } int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two(hitsr, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // TODO: cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) continue; sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Loop blocks in a kernel */ for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries } // End loop blocks /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Two( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptRankMatrix * copy_mats[], // temporary matrices for reduction sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits); sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk; sptIndexVector * restrict kschr_mode = hitsr->kschr[mode]; int tk2 = 2; /* Loop parallel iterations */ #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk/tk2) for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) { int tid = omp_get_thread_num(); /* Loop kernels */ // Cannot compile using icc // #pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk2) for(sptIndex k=0; k<num_kernel_dim; ++k) { if(i >= kschr_mode[k].len) { // printf("i: %u, k: %u\n", i, k); continue; } sptIndex kptr_loc = kschr_mode[k].data[i]; sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc]; sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1]; /* Loop blocks in a kernel */ for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* use copy_mats to store each thread's output */ sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels } // End loop iterations /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(sptIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk/tk2; ++t) { #pragma omp simd for(sptElementIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int sptOmpMTTKRPHiCOOBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOBlocks_3D(hitsr, mats, mats_order, mode, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); sptIndex const nmodes = hitsr->nmodes; if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_3D(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const M = mats[nmodes]; sptValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); // omp_lock_t lock; // omp_init_lock(&lock); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord)); sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Block indices */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = hitsr->binds[m].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Element indices */ for(sptIndex m=0; m<nmodes; ++m) ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z]; /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptMatrix * times_mat = mats[times_mat_index]; sptIndex tmp_i = ele_coord[times_mat_index]; sptValue const entry = vals[z]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; times_mat = mats[times_mat_index]; tmp_i = ele_coord[times_mat_index]; for(sptIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } sptIndex const mode_i = ele_coord[mode]; // omp_set_lock(&lock); for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += scratch.data[r]; } // omp_unset_lock(&lock); } // End loop entries /* Free thread-private space */ free(block_coord); free(ele_coord); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels // omp_destroy_lock(&lock); return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_3D( sptSparseTensorHiCOO const * const hitsr, sptMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { omp_set_nested(1); omp_set_dynamic(0); sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptIndex const R = mats[mode]->ncols; sptMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b]; sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b]; sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b]; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z]; sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z]; sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptIndex r=0; r<R; ++r) { #pragma omp atomic update mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { sptIndex const nmodes = hitsr->nmodes; omp_set_nested(1); omp_set_dynamic(0); if(nmodes == 3) { sptAssert(sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk, tb) == 0); return 0; } sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat)); sptValueVector scratch; // Temporary array sptNewValueVector(&scratch, R, R); /* Blocked matrices */ for(sptIndex m=0; m<nmodes; ++m) blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { /* Multiply the 1st matrix */ sptIndex times_mat_index = mats_order[1]; sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z]; sptValue const entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } /* Multiply the rest matrices */ for(sptIndex m=2; m<nmodes; ++m) { times_mat_index = mats_order[m]; tmp_i = hitsr->einds[times_mat_index].data[z]; for(sptElementIndex r=0; r<R; ++r) { scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r]; } } sptElementIndex const mode_i = hitsr->einds[mode].data[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r]; } } // End loop entries /* Free thread-private space */ free(blocked_times_mat); sptFreeValueVector(&scratch); } // End loop blocks } // End loop kernels return 0; } int sptOmpMTTKRPHiCOOKernelsBlocks_3D_MatrixTiling( sptSparseTensorHiCOO const * const hitsr, sptRankMatrix * mats[], // mats[nmodes] as temporary space. sptIndex const mats_order[], // Correspond to the mode order of X. sptIndex const mode, const int tk, const int tb) { sptIndex const nmodes = hitsr->nmodes; sptIndex const * const ndims = hitsr->ndims; sptValue const * const restrict vals = hitsr->values.data; sptElementIndex const stride = mats[0]->stride; /* Check the mats. */ sptAssert(nmodes ==3); for(sptIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } sptIndex const tmpI = mats[mode]->nrows; sptElementIndex const R = mats[mode]->ncols; sptRankMatrix * const restrict M = mats[nmodes]; sptValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(*mvals)); sptIndex times_mat_index_1 = mats_order[1]; sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1]; sptIndex times_mat_index_2 = mats_order[2]; sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2]; /* Loop kernels */ #pragma omp parallel for num_threads(tk) for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) { sptNnzIndex kptr_begin = hitsr->kptr.data[k]; sptNnzIndex kptr_end = hitsr->kptr.data[k+1]; /* Loop blocks in a kernel */ #pragma omp parallel for num_threads(tb) for(sptIndex b=kptr_begin; b<kptr_end; ++b) { /* Allocate thread-private data */ sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride; sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride; sptNnzIndex bptr_begin = hitsr->bptr.data[b]; sptNnzIndex bptr_end = hitsr->bptr.data[b+1]; /* Loop entries in a block */ for(sptIndex z=bptr_begin; z<bptr_end; ++z) { sptElementIndex mode_i = hitsr->einds[mode].data[z]; sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z]; sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z]; sptValue entry = vals[z]; for(sptElementIndex r=0; r<R; ++r) { #pragma omp atomic update blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry * blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] * blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r]; } } // End loop entries } // End loop blocks } // End loop kernels return 0; }
homog.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <math.h> #include <mpi.h> #include <hdf5.h> #include "mceik_struct.h" #include "mceik_broadcast.h" #include "mpiutils.h" #include "locate.h" #include "h5io.h" void freeStations(struct mceik_stations_struct *stations); void freeCatalog(struct mceik_catalog_struct *catalog); static void createHomogeneousModel(const int nx, const int ny, const int nz, const double vel_ms, int *__restrict__ vmod); int computeHomogeneousTraveltimes( const int nx, const int ny, const int nz, double x0, double y0, double z0, const double dx, double dy, const double dz, const double xs, const double ys, double zs, const double vel, double *__restrict__ ttimes); float *double2FloatArray(const int n, double *__restrict__ x); /*! * @brief Homgeneous test case for earthquake location */ int main(int argc, char **argv) { const char *fcnm = "xhomog\0"; const char *projnm = "homog\0"; char ttimeScratchFile[PATH_MAX]; MPI_Comm globalComm, intraTableComm, interTableComm; struct mceik_catalog_struct catalog; struct mceik_stations_struct stations; double *ttimes; float *ttimes4; int *vpmod, *vsmod; double dist, dx, dy, dz, velUse, x0, x0Loc, x1, y0, y0Loc, y1, z0, z0Loc, z1; int myid, nprocs, nx, ny, nz; int imbx, imby, imbz; int *tableToStation, *tablePhase; int i, ierr, iphase, itable, ix, ix0, ix1, iy, iy0, iy1, iz0, iz1, k, nevents, nmodels, nxrec, nyrec, ndivx, ndivy, ndivz, ndx, ndy, ndz, nkeep, ntables, nwork, nxLoc, nyLoc, nzLoc; int globalCommInt, intraTableCommInt, interTableCommInt; bool lsaveScratch; hid_t locFileID, tttFileID; const double const_vp = 2000.0; // Slower is harder for the solver const double const_vs = const_vp/sqrt(3.0); const double varVp = 0.25; const double varVs = 0.25; const int master = 0; const int model = 1; const int ireord = 1; // Reorder the communicator const int iwt = 0; // Dont weight const int locJob = 2; //------------------------------------------------------------------------// // // Initialize mpi MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // initializations memset(&catalog, 0, sizeof(struct mceik_catalog_struct)); memset(&stations, 0, sizeof(struct mceik_stations_struct)); vpmod = NULL; vsmod = NULL; x0 = 0.0; y0 = 0.0; z0 = 0.0; x1 = 31.e3; y1 = 28.e3; z1 = 25.e3; dx = 1000.0; dy = 1000.0; dz = 1000.0; ndivx = 2; ndivy = 1; ndivz = 1; nmodels = 1; lsaveScratch = false; nx = (int) ((x1 - x0)/dx + 0.5) + 1; ny = (int) ((y1 - y0)/dy + 0.5) + 1; nz = (int) ((z1 - z0)/dz + 0.5) + 1; if (myid == master){printf("%s: Splitting the commuicator...\n", fcnm);} globalCommInt = (int) (MPI_Comm_c2f(MPI_COMM_WORLD)); mpiutils_initialize3d(&globalCommInt, &ireord, &iwt, &ndivx, &ndivy, &ndivz, &ierr); if (ierr != 0) { printf("%s: Error splitting the communicators!\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } // get the communicator IDs mpiutils_getCommunicators(&globalCommInt, &intraTableCommInt, &interTableCommInt, &ierr); if (ierr != 0) { printf("%s: Error getting communicators\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } globalComm = MPI_Comm_f2c((MPI_Fint) globalCommInt); intraTableComm = MPI_Comm_f2c((MPI_Fint) intraTableCommInt); interTableComm = MPI_Comm_f2c((MPI_Fint) interTableCommInt); MPI_Barrier(globalComm); if (myid == master) { srand(2016); //srand(time(0)); // will create random number nevents = 4; // Scatter the receivers across the free surface printf("%s: Creating station locations...\n", fcnm); nxrec = 2; nyrec = 3; stations.nstat = nxrec*nyrec; stations.lcartesian = 1; // Cartesian system stations.netw = (char **)calloc(stations.nstat, sizeof(char *)); stations.stnm = (char **)calloc(stations.nstat, sizeof(char *)); stations.chan = (char **)calloc(stations.nstat, sizeof(char *)); stations.loc = (char **)calloc(stations.nstat, sizeof(char *)); for (i=0; i<stations.nstat; i++) { stations.netw[i] = (char *)calloc(64, sizeof(char)); stations.stnm[i] = (char *)calloc(64, sizeof(char)); stations.chan[i] = (char *)calloc(64, sizeof(char)); stations.loc[i] = (char *)calloc(64, sizeof(char)); strcpy(stations.netw[i], "NA\0"); sprintf(stations.stnm[i], "RC%d", i+1); strcpy(stations.chan[i], "HH?\0"); strcpy(stations.loc[i], "00\0"); } // Randomly scatter stations stations.xrec = (double *)calloc(stations.nstat, sizeof(double)); stations.yrec = (double *)calloc(stations.nstat, sizeof(double)); stations.zrec = (double *)calloc(stations.nstat, sizeof(double)); for (iy=0; iy<nyrec; iy++) { for (ix=0; ix<nxrec; ix++) { stations.xrec[iy*nxrec+ix] = x0 + ((int) ((double) rand()/RAND_MAX*(nx - 1)))*dx; stations.yrec[iy*nxrec+ix] = y0 + ((int) ((double) rand()/RAND_MAX*(ny - 1)))*dy; //stations.xrec[iy*nxrec+ix] = x0 + (nx/4 + ix)*dx; //stations.yrec[iy*nxrec+ix] = y0 + (ny/4 + iy)*dy; stations.zrec[iy*nxrec+ix] = z1; if (stations.xrec[iy*nxrec+ix] < x0 || stations.xrec[iy*nxrec+ix] > x1 || stations.yrec[iy*nxrec+ix] < y0 || stations.yrec[iy*nxrec+ix] > y1 || stations.zrec[iy*nxrec+ix] < z0 || stations.zrec[iy*nxrec+ix] > z1) { printf("%s: Station out of bounds!\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 20); } printf("%f %f %f\n", stations.xrec[iy*nxrec+ix], stations.yrec[iy*nxrec+ix], stations.zrec[iy*nxrec+ix]); } } stations.pcorr = (double *) calloc((size_t) stations.nstat, sizeof(double)); stations.scorr = (double *) calloc((size_t) stations.nstat, sizeof(double)); stations.lhasP = (int *) calloc((size_t) stations.nstat, sizeof(int)); stations.lhasS = (int *) calloc((size_t) stations.nstat, sizeof(int)); // Make some events printf("%s: Creating events...\n", fcnm); catalog.nevents = nevents; nwork = 2*catalog.nevents*stations.nstat; catalog.xsrc = (double *) calloc((size_t) catalog.nevents, sizeof(double)); catalog.ysrc = (double *) calloc((size_t) catalog.nevents, sizeof(double)); catalog.zsrc = (double *) calloc((size_t) catalog.nevents, sizeof(double)); catalog.tori = (double *) calloc((size_t) catalog.nevents, sizeof(double)); catalog.tobs = (double *) calloc((size_t) nwork, sizeof(double)); catalog.test = (double *) calloc((size_t) nwork, sizeof(double)); catalog.varObs = (double *) calloc((size_t) nwork, sizeof(double)); catalog.luseObs = (int *) calloc((size_t) nwork, sizeof(int)); catalog.pickType = (int *) calloc((size_t) nwork, sizeof(int)); catalog.statPtr = (int *) calloc((size_t) nwork, sizeof(int)); catalog.obsPtr = (int *) calloc((size_t) catalog.nevents + 1, sizeof(int)); nkeep = 0; for (i=0; i<catalog.nevents; i++) { catalog.xsrc[i] = x0 + (x1 - x0)*(double) rand()/RAND_MAX; catalog.ysrc[i] = y0 + (y1 - y0)*(double) rand()/RAND_MAX; catalog.zsrc[i] = z0 + (z1 - z0)*(double) rand()/RAND_MAX; // Now attach some theoreticals for (k=0; k<stations.nstat; k++) { dist = sqrt( pow(stations.xrec[k] - catalog.xsrc[i], 2) + pow(stations.yrec[k] - catalog.ysrc[i], 2) + pow(stations.zrec[k] - catalog.zsrc[i], 2) ); for (iphase=1; iphase<=2; iphase++) { if (iphase == P_PRIMARY_PICK) { catalog.tobs[nkeep] = dist/const_vp; catalog.varObs[nkeep] = varVp; } else if (iphase == S_PRIMARY_PICK) { catalog.tobs[nkeep] = dist/const_vs; catalog.varObs[nkeep] = varVs; } else { printf("%s: Invalid phase %d\n", fcnm, iphase); MPI_Abort(MPI_COMM_WORLD, 30); } catalog.luseObs[nkeep] = 1; catalog.pickType[nkeep] = iphase; catalog.statPtr[nkeep] = k + 1; nkeep = nkeep + 1; } // Do i have a P and S phase? if (i == 0) { stations.lhasP[k] = 1; stations.lhasS[k] = 1; } } catalog.obsPtr[i+1] = nkeep; } } // Cook up a list of bogus earthquakes // Initialize the eikonal solver if (myid == master) { printf("%s: Generating constant velocity model...\n", fcnm); vpmod = (int *)calloc((size_t) (nx*ny*nz), sizeof(int)); vsmod = (int *)calloc((size_t) (nx*ny*nz), sizeof(int)); createHomogeneousModel(nx, ny, nz, const_vp, vpmod); createHomogeneousModel(nx, ny, nz, const_vs, vsmod); } else { vpmod = (int *)calloc((size_t) (nx*ny*nz), sizeof(int)); vsmod = (int *)calloc((size_t) (nx*ny*nz), sizeof(int)); } // Distribute the inversion model to all MPI_Bcast(vpmod, nx*ny*nz, MPI_INTEGER, master, MPI_COMM_WORLD); MPI_Bcast(vsmod, nx*ny*nz, MPI_INTEGER, master, MPI_COMM_WORLD); // Distribute the station information broadcast_stations(MPI_COMM_WORLD, master, &stations); // Distribute the catalog broadcast_catalog(MPI_COMM_WORLD, master, &catalog); // Make the local model ix0 = 0; iy0 = 0; iz0 = 0; mpiutils_grd2ijk(&myid, &ndivx, &ndivy, &ndivz, &imbx, &imby, &imbz, &ierr); if (ierr != 0){printf("%s: Failed to map rank to block\n", fcnm);} ndx = fmax(nx/ndivx, 1); ndy = fmax(ny/ndivy, 1); ndz = fmax(nz/ndivz, 1); ix0 = imbx*ndx; iy0 = imby*ndy; iz0 = imbz*ndz; ix1 = (imbx + 1)*ndx; iy1 = (imby + 1)*ndy; iz1 = (imbz + 1)*ndz; if (imbx + 1 == ndivx){ix1 = nx;} if (imby + 1 == ndivy){iy1 = ny;} if (imbz + 1 == ndivz){iz1 = nz;} nxLoc = ix1 - ix0; nyLoc = iy1 - iy0; nzLoc = iz1 - iz0; x0Loc = x0 + ix0*dx; y0Loc = y0 + iy0*dy; z0Loc = z0 + iz0*dz; // Initialize the HDF5 traveltime file int myTableID; MPI_Comm_rank(interTableComm, &myTableID); memset(ttimeScratchFile, 0, sizeof(ttimeScratchFile)); sprintf(ttimeScratchFile, "%s_%d", projnm, myTableID+1); ierr = eikonal_h5io_initTTables(intraTableComm, //MPI_COMM_WORLD, "./\0", ttimeScratchFile, //"test\0", ix0, iy0, iz0, nx, ny, nz, nxLoc, nyLoc, nzLoc, nmodels, stations.nstat, lsaveScratch, x0, y0, z0, dx, dy, dz, &tttFileID); if (ierr != 0) { printf("%s: Error initializing H5 file\n", fcnm); return EXIT_FAILURE; } // Determine how many traveltime tables to make ntables = 0; for (i=0; i<stations.nstat; i++) { if (stations.lhasP[i] == 1){ntables = ntables + 1;} if (stations.lhasS[i] == 1){ntables = ntables + 1;} } tableToStation = (int *) calloc((size_t) ntables, sizeof(int)); tablePhase = (int *) calloc((size_t) ntables, sizeof(int)); itable = 0; for (i=0; i<stations.nstat; i++) { if (stations.lhasP[i] == 1) { tableToStation[itable] = i + 1; tablePhase[itable] = 1; itable = itable + 1; } if (stations.lhasS[i] == 1) { tableToStation[itable] = i + 1; tablePhase[itable] = 2; itable = itable + 1; } } if (myid == master) { printf("%s: Will compute %d travel time tables\n", fcnm, ntables); } // This is the parallel loop on tables ttimes = (double *) calloc((size_t) nxLoc*nyLoc*nzLoc, sizeof(double)); MPI_Barrier(MPI_COMM_WORLD); for (itable=0; itable<ntables; itable++) { iphase = tablePhase[itable]; k = tableToStation[itable]; velUse = const_vp; if (iphase == S_PRIMARY_PICK) { velUse = const_vs; } else { if (iphase != P_PRIMARY_PICK) { printf("%s: Invalid phase\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } } // Compute traveltimes from station to all points in medium ierr = computeHomogeneousTraveltimes(nxLoc, nyLoc, nzLoc, x0Loc, y0Loc, z0Loc, dx, dy, dz, stations.xrec[k], stations.yrec[k], stations.zrec[k], velUse, ttimes); if (ierr != 0) { printf("%s: Error computing homogeneous traveltimes\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } MPI_Barrier(MPI_COMM_WORLD); // Convert to float ttimes4 = double2FloatArray(nxLoc*nyLoc*nzLoc, ttimes); // Save the data ierr = eikonal_h5io_writeTravelTimes(intraTableComm, //MPI_COMM_WORLD, tttFileID, k, model, iphase, ix0, iy0, iz0, nxLoc, nyLoc, nzLoc, ttimes4); if (ierr != 0) { printf("%s: Failed writing traveltimes %d\n", fcnm, myid); MPI_Abort(MPI_COMM_WORLD, 30); } // Verify memset(ttimes4, 0, (size_t) (nxLoc*nyLoc*nzLoc)*sizeof(float)); ierr = eikonal_h5io_readTravelTimes(intraTableComm, //MPI_COMM_WORLD, tttFileID, k, model, iphase, ix0, iy0, iz0, nxLoc, nyLoc, nzLoc, ttimes4); if (ierr != 0) { printf("%s: Error loading traveltimes\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } double difMax = 0.0; int in; for (in=0; in<nxLoc*nyLoc*nzLoc; in++) { difMax = fmax(difMax, fabs(ttimes4[in] - (float) ttimes[in])); } if (difMax > 1.e-5) { printf("%s: Failed to read/write traveltime verification\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } free(ttimes4); } free(ttimes); // Initialize the jPDF file ierr = eikonal_h5io_initLocations(MPI_COMM_WORLD, "./\0", projnm, ix0, iy0, iz0, nx, ny, nz, nxLoc, nyLoc, nzLoc, nmodels, catalog.nevents, x0, y0, z0, dx, dy, dz, &locFileID); // I am now ready to locate some earthquakes int iverb = 0; locate3d_initialize(&intraTableComm, &iverb, (long *) &tttFileID, (long *) &locFileID, &ndivx, &ndivy, &ndivz, &ierr); if (ierr != 0) { printf("%s: Failed to initialize locator\n", fcnm); MPI_Abort(MPI_COMM_WORLD, 30); } // Call the locator double *hypo = (double *)calloc((size_t) catalog.nevents*4, sizeof(double)); int nobs = 2*stations.nstat; double *statCor = (double *)calloc((size_t) nobs, sizeof(double)); locate3d_gridsearch(&model, &locJob, &nobs, &catalog.nevents, catalog.luseObs, catalog.statPtr, catalog.pickType, statCor, catalog.tori, catalog.varObs, catalog.tobs, catalog.test, hypo, &ierr); // Finalize locate3d_finalize(); eikonal_h5io_finalize(intraTableComm, &tttFileID); eikonal_h5io_finalize(intraTableComm, &locFileID); freeStations(&stations); freeCatalog(&catalog); if (vpmod != NULL){free(vpmod);} if (vsmod != NULL){free(vsmod);} mpiutils_finalize(); MPI_Finalize(); return EXIT_SUCCESS; } //============================================================================// /*! * @brief Frees memory on the catalog structure * * @param[in,out] catalog on exit all memory on catalog has been freed and * any scalars have been nulled out * * @author Ben Baker * */ void freeCatalog(struct mceik_catalog_struct *catalog) { if (catalog->xsrc != NULL){free(catalog->xsrc);} if (catalog->ysrc != NULL){free(catalog->ysrc);} if (catalog->zsrc != NULL){free(catalog->zsrc);} if (catalog->tori != NULL){free(catalog->tori);} if (catalog->tobs != NULL){free(catalog->tobs);} if (catalog->test != NULL){free(catalog->test);} if (catalog->varObs != NULL){free(catalog->varObs);} if (catalog->luseObs != NULL){free(catalog->luseObs);} if (catalog->pickType != NULL){free(catalog->pickType);} if (catalog->statPtr != NULL){free(catalog->statPtr);} if (catalog->obsPtr != NULL){free(catalog->obsPtr);} memset(catalog, 0, sizeof(struct catalog_struct)); return; } //============================================================================// /*! * @brief Frees the station structure * * @param[in,out] station on input contains the station list. * on output all memory has been released from * the station list and it has been reset. * * @author Ben Baker * */ void freeStations(struct mceik_stations_struct *stations) { int i; for (i=0; i<stations->nstat; i++) { if (stations->netw != NULL) { if (stations->netw[i] != NULL){free(stations->netw[i]);} } if (stations->stnm != NULL) { if (stations->stnm[i] != NULL){free(stations->stnm[i]);} } if (stations->chan != NULL) { if (stations->chan[i] != NULL){free(stations->chan[i]);} } if (stations->loc != NULL) { if (stations->loc[i] != NULL){free(stations->loc[i]);} } } if (stations->netw != NULL){free(stations->netw);} if (stations->stnm != NULL){free(stations->stnm);} if (stations->chan != NULL){free(stations->chan);} if (stations->loc != NULL){free(stations->loc);} if (stations->xrec != NULL){free(stations->xrec);} if (stations->yrec != NULL){free(stations->yrec);} if (stations->zrec != NULL){free(stations->zrec);} if (stations->pcorr != NULL){free(stations->pcorr);} if (stations->scorr != NULL){free(stations->scorr);} if (stations->lhasP != NULL){free(stations->lhasP);} if (stations->lhasS != NULL){free(stations->lhasS);} memset(stations, 0, sizeof(struct mceik_stations_struct)); return; } //============================================================================// /*! * @brief Sets a homogeneous model * * @param[in] nx number of x grid points in grid * @param[in] ny number of y grid points in grid * @param[in] nz number of z grid points in grid * @param[in] vel_ms velocity (m/s) * * @param[in] vmod const velocity model [nz x ny x nx] * * @author Ben Baker * */ static void createHomogeneousModel(const int nx, const int ny, const int nz, const double vel_ms, int *__restrict__ vmod) { int indx, ix, iy, iz, nxy; nxy = nx*ny; for (iz=0; iz<nz; iz++) { for (iy=0; iy<ny; iy++) { for (ix=0; ix<nx; ix++) { indx = iz*nxy + iy*nx + ix; vmod[indx] = (int) vel_ms; } } } return; } //============================================================================// /*! * @brief Computes the traveltimes to all points in a constant gridded * velocity model * * @param[in] nx number of x grid points in model * @param[in] ny number of y grid points in model * @param[in] nz number of z grid points in model * @param[in] x0 x origin (m) * @param[in] y0 y origin (m) * @param[in] z0 z origin (m) * @param[in] dx grid spacing in x (m) * @param[in] dy grid spacing in y (m) * @param[in] dz grid spacing in z (m) * @param[in] xs x source position (m) * @param[in] ys y source position (m) * @param[in] zs z source position (m) * @param[in] vel constant medium velocity (m/s) * * @param[out] ttimes traveltimes (s) at each point in medium [nx*ny*nz]. * the k'th index for the (ix,iy,iz)'th grid point * is accessed by k = iz*nx*ny + iy*nx + ix for * ix=0,1,...,nx-1, iy=0,1,...,ny-1, iz=0,1,...,nz-1. * * @result 0 indicates success. * * @author Ben Baker * */ int computeHomogeneousTraveltimes( const int nx, const int ny, const int nz, double x0, double y0, double z0, const double dx, double dy, const double dz, const double xs, const double ys, double zs, const double vel, double *__restrict__ ttimes) { double dist, slow, x, y, z; int indx, ix, iy, iz, nxy; nxy = nx*ny; slow = 1.0/vel; for (iz=0; iz<nz; iz++) { for (iy=0; iy<ny; iy++) { for (ix=0; ix<nx; ix++) { x = x0 + (double) ix*dx; y = y0 + (double) iy*dy; z = z0 + (double) iz*dz; dist = sqrt(pow(xs-x, 2) + pow(ys-y, 2) + pow(zs-z, 2)); indx = iz*nxy + iy*nx + ix; ttimes[indx] = dist*slow; } } } return 0; } //============================================================================// float *double2FloatArray(const int n, double *__restrict__ x) { float *x4; int i; x4 = (float *) calloc((size_t) n, sizeof(float)); #pragma omp simd for (i=0; i<n; i++) { x4[i] = (float) x[i]; } return x4; }
GB_unop__exp2_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp2_fc64_fc64 // op(A') function: GB_unop_tran__exp2_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexp2 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexp2 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexp2 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP2 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp2_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexp2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp2_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_gsmg.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i=0; i<n; i++) temp = temp + x[i]*x[i]; return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x) { HYPRE_Int i; for (i=0; i<n; i++) x[i] = a * x[i]; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int num_functions, HYPRE_Int *dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k=0; k<nsamples; k++) { nm = mydnrm2(n, samples+k*n); nm = 1./nm/nsamples; mydscal(n, nm, samples+k*n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p+n; p_offd = p_offd+num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i=0; i<n; i++) { mx = 0.; for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++) mx = hypre_max(mx, S_diag_data[j]); for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++) mx = hypre_max(mx, S_offd_data[j]); if (mx != 0.) minimax = hypre_min(minimax, mx); } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) count++; /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) count++; /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix *A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real **SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Zero,0); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i=0; i<n_local; i++) datax[i] = 0.; Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Temp,0); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i=0; i<n_local; i++) datax[i] = 0.; U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(U,0); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); hypre_ParVectorSetPartitioningOwner(Qtemp,0); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample=0; sample<nsamples; sample++) { for (i=0; i<n_local; i++) datax[i] = hypre_Rand() - .5; for (i=0; i<num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/, rlx_type , 0 /*rel pts*/, 1.0 /*weight*/, 1.0 /*omega*/, NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i=0; i<n_local; i++) *p++ = datax[i]; } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) hypre_ParVectorDestroy(Qtemp); *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix *A, HYPRE_Real *SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) hypre_printf("Minimax chosen: %f\n", minimax); /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh*minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i=0; i<n; i++) V[i] = 1.0; for (j=0; j<num; j++) { nrm = mydnrm2(n, &V[j*n]); mydscal(n, 1./nrm, &V[j*n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V, HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) return 0; work_size = 2000*64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST); ap = a; for (j=0; j<nc; j++) { for (i=0; i<num; i++) { *ap = V[i*n+ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i=0; i<num; i++) b[i] = V[i*n+ip]; { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n"); /* copy solution into output vector */ for (j=0; j<nc; j++) val[j] = b[j]; } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real *SmoothVecs, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real *S_ext_data; //HYPRE_Int *S_ext_i; //HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt *big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds();*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in the setup */ /* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) sum += S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) sum += S_offd_data[jj1]; } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) sum += S_ext_data[jj1]; } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) sum += S_ext_data[jj1]; } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]] += distribute * S_ext_data[jj1]; } else { /* check to see if it is in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) sum += P_diag_data[jj]; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) sum += P_offd_data[jj]; for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= sum; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= sum; } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); }
convolution_1x1_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b kernel_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16); int q = 0; for (; q + 3 < outch; q += 4) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; float* g0 = kernel_pack4.channel(q / 4); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = k0[0]; g0[1] = k1[0]; g0[2] = k2[0]; g0[3] = k3[0]; g0[4] = k0[1]; g0[5] = k1[1]; g0[6] = k2[1]; g0[7] = k3[1]; g0[8] = k0[2]; g0[9] = k1[2]; g0[10] = k2[2]; g0[11] = k3[2]; g0[12] = k0[3]; g0[13] = k1[3]; g0[14] = k2[3]; g0[15] = k3[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start; remain_size_start = 0; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); __m128 _r1 = _mm_loadu_ps(img0 + 4); __m128 _r2 = _mm_loadu_ps(img0 + 8); __m128 _r3 = _mm_loadu_ps(img0 + 12); _mm_storeu_ps(tmpptr, _r0); _mm_storeu_ps(tmpptr + 4, _r1); _mm_storeu_ps(tmpptr + 8, _r2); _mm_storeu_ps(tmpptr + 12, _r3); tmpptr += 16; img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); __m128 _r1 = _mm_loadu_ps(img0 + 4); _mm_storeu_ps(tmpptr, _r0); _mm_storeu_ps(tmpptr + 4, _r1); tmpptr += 8; img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 4; float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(img0); _mm_storeu_ps(tmpptr, _r0); tmpptr += 4; img0 += bottom_blob.cstep * 4; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 4); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _mm_loadu_ps(biasptr); __m128 _sum2 = _mm_loadu_ps(biasptr); __m128 _sum3 = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val00 = _mm_load1_ps(tmpptr); __m128 _val01 = _mm_load1_ps(tmpptr + 1); __m128 _val02 = _mm_load1_ps(tmpptr + 2); __m128 _val03 = _mm_load1_ps(tmpptr + 3); __m128 _val10 = _mm_load1_ps(tmpptr + 4); __m128 _val11 = _mm_load1_ps(tmpptr + 5); __m128 _val12 = _mm_load1_ps(tmpptr + 6); __m128 _val13 = _mm_load1_ps(tmpptr + 7); __m128 _val20 = _mm_load1_ps(tmpptr + 8); __m128 _val21 = _mm_load1_ps(tmpptr + 9); __m128 _val22 = _mm_load1_ps(tmpptr + 10); __m128 _val23 = _mm_load1_ps(tmpptr + 11); __m128 _val30 = _mm_load1_ps(tmpptr + 12); __m128 _val31 = _mm_load1_ps(tmpptr + 13); __m128 _val32 = _mm_load1_ps(tmpptr + 14); __m128 _val33 = _mm_load1_ps(tmpptr + 15); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0); _sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1); _sum2 = _mm_comp_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm_comp_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm_comp_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm_comp_fmadd_ps(_w3, _val23, _sum2); _sum3 = _mm_comp_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm_comp_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm_comp_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm_comp_fmadd_ps(_w3, _val33, _sum3); tmpptr += 16; kptr0 += 16; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); _mm_store_ps(outptr0 + 8, _sum2); _mm_store_ps(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val00 = _mm_load1_ps(tmpptr); __m128 _val01 = _mm_load1_ps(tmpptr + 1); __m128 _val02 = _mm_load1_ps(tmpptr + 2); __m128 _val03 = _mm_load1_ps(tmpptr + 3); __m128 _val10 = _mm_load1_ps(tmpptr + 4); __m128 _val11 = _mm_load1_ps(tmpptr + 5); __m128 _val12 = _mm_load1_ps(tmpptr + 6); __m128 _val13 = _mm_load1_ps(tmpptr + 7); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum0 = _mm_comp_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm_comp_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm_comp_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm_comp_fmadd_ps(_w3, _val03, _sum0); _sum1 = _mm_comp_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm_comp_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm_comp_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm_comp_fmadd_ps(_w3, _val13, _sum1); tmpptr += 8; kptr0 += 16; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); outptr0 += 8; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); const float* kptr0 = (const float*)kernel.channel(p); __m128 _sum = _mm_loadu_ps(biasptr); for (int q = 0; q < inch; q++) { __m128 _val0 = _mm_load1_ps(tmpptr); __m128 _val1 = _mm_load1_ps(tmpptr + 1); __m128 _val2 = _mm_load1_ps(tmpptr + 2); __m128 _val3 = _mm_load1_ps(tmpptr + 3); __m128 _w0 = _mm_load_ps(kptr0); __m128 _w1 = _mm_load_ps(kptr0 + 4); __m128 _w2 = _mm_load_ps(kptr0 + 8); __m128 _w3 = _mm_load_ps(kptr0 + 12); _sum = _mm_comp_fmadd_ps(_w0, _val0, _sum); _sum = _mm_comp_fmadd_ps(_w1, _val1, _sum); _sum = _mm_comp_fmadd_ps(_w2, _val2, _sum); _sum = _mm_comp_fmadd_ps(_w3, _val3, _sum); tmpptr += 4; kptr0 += 16; } _mm_store_ps(outptr0, _sum); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m128 _v = _mm_load_ps(r0); _mm_store_ps(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_sse(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
serializededata.h
#ifndef SRC_BASIS_CONTAINERS_SERIALIZEDEDATA_H_ #define SRC_BASIS_CONTAINERS_SERIALIZEDEDATA_H_ #include "edata.h" #include "tarray.h" #include "basis/utilities/packing.h" #include <vector> namespace mesio { template <typename TEBoundaries, typename TEData> class serializededata { public: static void balance(size_t esize, std::vector<std::vector<TEData> > &data, const std::vector<size_t> *distribution = NULL) { // profiler::syncstart("balance_fixed"); std::vector<size_t> _distribution; if (distribution == NULL) { size_t size = 0; for (size_t t = 0; t < data.size(); t++) { size += data[t].size(); } _distribution = tarray<size_t>::distribute(data.size(), size); } else { _distribution = *distribution; } // profiler::syncparam("size", _distribution.back()); for (size_t t = 0, tt = 0; t < data.size(); tt = ++t) { while (++tt && data[t].size() < esize * (_distribution[t + 1] - _distribution[t])) { size_t diff = esize * (_distribution[t + 1] - _distribution[t]) - data[t].size(); if (diff < data[tt].size()) { data[t].insert(data[t].end(), data[tt].begin(), data[tt].begin() + diff); data[tt].erase(data[tt].begin(), data[tt].begin() + diff); } else { data[t].insert(data[t].end(), data[tt].begin(), data[tt].end()); data[tt].clear(); } } if (data[t].size() > esize * (_distribution[t + 1] - _distribution[t])) { size_t diff = data[t].size() - esize * (_distribution[t + 1] - _distribution[t]); data[t + 1].insert(data[t + 1].begin(), data[t].end() - diff, data[t].end()); data[t].erase(data[t].end() - diff, data[t].end()); } } // profiler::syncend("balance_fixed"); } static void balance(std::vector<std::vector<TEBoundaries> > &boundaries, std::vector<std::vector<TEData> > &data, const std::vector<size_t> *distribution = NULL) { // profiler::syncstart("balance_various"); size_t size = 0; std::vector<size_t> sizes(boundaries.size()); for (size_t t = 0; t < boundaries.size(); t++) { sizes[t] = boundaries[t].size(); size += boundaries[t].size(); } --sizes[0]; // profiler::syncparam("size", size); if (size == 0) { // profiler::syncend("balance_various"); return; } std::vector<size_t> _distribution; if (distribution == NULL) { _distribution = tarray<size_t>::distribute(data.size(), size - 1); } else { _distribution = *distribution; } for (size_t t = 0, tt = 0; t < boundaries.size(); tt = ++t) { while (++tt && sizes[t] < _distribution[t + 1] - _distribution[t]) { size_t diff = _distribution[t + 1] - _distribution[t] - sizes[t]; if (diff < sizes[tt]) { size_t ttt = 0; while (boundaries[tt - ++ttt].size() == 0); size_t ediff = *(boundaries[tt].begin() + diff - 1) - boundaries[tt - ttt].back(); data[t].insert(data[t].end(), data[tt].begin(), data[tt].begin() + ediff); data[tt].erase(data[tt].begin(), data[tt].begin() + ediff); boundaries[t].insert(boundaries[t].end(), boundaries[tt].begin(), boundaries[tt].begin() + diff); boundaries[tt].erase(boundaries[tt].begin(), boundaries[tt].begin() + diff); sizes[t] += diff; sizes[tt] -= diff; } else { sizes[t] += boundaries[tt].size(); sizes[tt] -= boundaries[tt].size(); data[t].insert(data[t].end(), data[tt].begin(), data[tt].end()); data[tt].clear(); boundaries[t].insert(boundaries[t].end(), boundaries[tt].begin(), boundaries[tt].end()); boundaries[tt].clear(); } } if (sizes[t] > _distribution[t + 1] - _distribution[t]) { size_t diff = sizes[t] - (_distribution[t + 1] - _distribution[t]); size_t ediff = boundaries[t].back() - *(boundaries[t].end() - diff - 1); data[t + 1].insert(data[t + 1].begin(), data[t].end() - ediff, data[t].end()); data[t].erase(data[t].end() - ediff, data[t].end()); boundaries[t + 1].insert(boundaries[t + 1].begin(), boundaries[t].end() - diff, boundaries[t].end()); boundaries[t].erase(boundaries[t].end() - diff, boundaries[t].end()); sizes[t] -= diff; sizes[t + 1] += diff; } } // profiler::syncend("balance_various"); } private: template<class TIterator, typename TIteratorEData> class iterator_base { public: // bool operator< (const TIterator &other) const { return _edata._begin < other._edata._begin; } // bool operator> (const TIterator &other) const { return _edata._begin > other._edata._begin; } // bool operator<=(const TIterator &other) const { return _edata._begin <= other._edata._begin; } // bool operator>=(const TIterator &other) const { return _edata._begin >= other._edata._begin; } bool operator==(const TIterator &other) const { return _element == other._element && _edata._begin == other._edata._begin; } bool operator!=(const TIterator &other) const { return _element != other._element || _edata._begin != other._edata._begin; } TIterator& operator++() { return move( 1); } TIterator& operator--() { return move(-1); } TIterator operator++(int) {TIterator tmp(*static_cast<TIterator*>(this)); operator++(); return tmp; } TIterator operator--(int) {TIterator tmp(*static_cast<TIterator*>(this)); operator--(); return tmp; } template <typename TType> TIterator operator+ (TType n) { return TIterator(*static_cast<TIterator*>(this)).move( n); } template <typename TType> TIterator operator- (TType n) { return TIterator(*static_cast<TIterator*>(this)).move(-n); } template <typename TType> TIterator& operator+=(TType n) { return move( n); } template <typename TType> TIterator& operator-=(TType n) { return move(-n); } protected: iterator_base(const TEBoundaries *begin, const TEBoundaries *element, const TEBoundaries *end, TIteratorEData *edata) : _element(begin), _end(end), _edata(edata, edata) { move(element - begin); } iterator_base(size_t edatasize, TIteratorEData *edata) : _element(NULL), _end(NULL), _edata(edata, edata) { _edata._end += edatasize; } template <typename TType> TIterator& move(TType n) { if (_element == NULL) { size_t size = _edata._end - _edata._begin; _edata._begin += n * size; _edata._end += n * size; } else { _edata._begin += *(_element + n) - *(_element); _element += n; if (_element != _end) { _edata._end = _edata._begin + *(_element + 1) - *_element; } else { _edata._end = _edata._begin; } } return static_cast<TIterator&>(*this); } const TEBoundaries* _element; const TEBoundaries* _end; edata<TIteratorEData> _edata; }; public: class iterator: public iterator_base<iterator, TEData> { friend class serializededata<TEBoundaries, TEData>; public: edata<TEData>& operator*() { return this->_edata; } edata<TEData>* operator->() { return &this->_edata; } private: iterator(TEBoundaries *begin, TEBoundaries *element, TEBoundaries *end, TEData *edata) : iterator_base<iterator, TEData>(begin, element, end, edata) { } iterator(size_t edatasize, TEData *edata) : iterator_base<iterator, TEData>(edatasize, edata) { } }; class const_iterator: public iterator_base<const_iterator, const TEData> { friend class serializededata<TEBoundaries, TEData>; public: edata<const TEData>& operator*() { return this->_edata; } edata<const TEData>* operator->() { return &this->_edata; } private: const_iterator(const TEBoundaries *begin, const TEBoundaries *element, const TEBoundaries *end, const TEData *edata) : iterator_base<const_iterator, const TEData>(begin, element, end, edata) { } const_iterator(size_t edatasize, TEData *edata) : iterator_base<const_iterator, const TEData>(edatasize, edata) { } }; // data are uniform serializededata(size_t edatasize, tarray<TEData> &&edata) : _eboundaries(0, 0), _edata(std::move(edata)), _edatasize(edatasize) { inititerators(edatasize); } serializededata(size_t edatasize, const std::vector<size_t> &distribution, TEData init = TEData{}) : _eboundaries(0, 0), _edata(distribution, edatasize, init), _edatasize(edatasize) { inititerators(edatasize); } // data are non-uniform serializededata(tarray<TEBoundaries> &&eboundaries, tarray<TEData> &&edata) : _eboundaries(std::move(eboundaries)), _edata(std::move(edata)), _edatasize(-1) { inititerators(); } serializededata(const serializededata<TEBoundaries, TEData> &other) : _eboundaries(other._eboundaries), _edata(other._edata), _edatasize(other._edatasize) { _edatasize != -1 ? inititerators(_edatasize) : inititerators(); } serializededata(serializededata<TEBoundaries, TEData> &&other) : _eboundaries(std::move(other._eboundaries)), _edata(std::move(other._edata)), _edatasize(std::move(other._edatasize)) { inititerators(); } serializededata<TEBoundaries, TEData>& operator=(const serializededata<TEBoundaries, TEData> &other) { if (this != &other) { _eboundaries = other._eboundaries; _edata = other._edata; _edatasize = other._edatasize; inititerators(); } return *this; } serializededata<TEBoundaries, TEData>& operator=(serializededata<TEBoundaries, TEData> &&other) { if (this != &other) { _eboundaries = std::move(other._eboundaries); _edata = std::move(other._edata); _edatasize = std::move(other._edatasize); inititerators(); } return *this; } size_t threads() const { return _edata.threads(); } size_t structures() const { if (_eboundaries.size()) { return _eboundaries.size() - 1; } else { return _edata.size() / _edatasize; } } size_t edataSize() const { return _edatasize; } void permute(const std::vector<esint> &permutation, const std::vector<size_t> &distribution) { if (_eboundaries.size()) { permuteNonUniformData(permutation, distribution); } else { permuteUniformData(permutation, distribution); } } iterator begin() { return _iterator.front(); } const_iterator begin() const { return _constiterator.front(); } const_iterator cbegin() const { return _constiterator.front(); } iterator end() { return _iterator.back(); } const_iterator end() const { return _constiterator.back(); } const_iterator cend() const { return _constiterator.back(); } iterator begin (size_t thread) { return _iterator[thread]; } const_iterator begin (size_t thread) const { return _constiterator[thread]; } const_iterator cbegin(size_t thread) const { return _constiterator[thread]; } iterator end (size_t thread) { return _iterator[thread + 1]; } const_iterator end (size_t thread) const { return _constiterator[thread + 1]; } const_iterator cend (size_t thread) const { return _constiterator[thread + 1]; } tarray<TEBoundaries>& boundarytarray() { return _eboundaries; } const tarray<TEBoundaries>& boundarytarray() const { return _eboundaries; } tarray<TEData>& datatarray() { return _edata; } const tarray<TEData>& datatarray() const { return _edata; } size_t packedSize() const { return sizeof(esint) + _eboundaries.packedSize() + _edata.packedSize(); } void pack(char* &p) const { memcpy(p, &_edatasize, sizeof(esint)); p += sizeof(esint); _eboundaries.pack(p); _edata.pack(p); } void unpack(const char* &p) { memcpy(&_edatasize, p, sizeof(esint)); p += sizeof(esint); _eboundaries.unpack(p); _edata.unpack(p); if (_edatasize == -1) { inititerators(); } else { inititerators(_edatasize); } } void updateDistribution() { if (_edatasize == -1) { inititerators(); } else { inititerators(_edatasize); } } private: void inititerators() { _iterator = std::vector<iterator>(threads() + 1, iterator(_eboundaries.begin(), _eboundaries.begin(), _eboundaries.end() - 1, _edata.begin())); _constiterator = std::vector<const_iterator>(threads() + 1, const_iterator(_eboundaries.begin(), _eboundaries.begin(), _eboundaries.end() - 1, _edata.begin())); for (size_t t = 1; t <= threads(); t++) { _iterator[t] += _eboundaries.distribution()[t] - 1; _constiterator[t] += _eboundaries.distribution()[t] - 1; } } void inititerators(size_t edatasize) { _iterator = std::vector<iterator>(threads() + 1, iterator(edatasize, _edata.begin())); _constiterator = std::vector<const_iterator>(threads() + 1, const_iterator(edatasize, _edata.begin())); for (size_t t = 1; t <= threads(); t++) { _iterator[t] += _edata.distribution()[t] / edatasize; _constiterator[t] += _edata.distribution()[t] / edatasize; } } void permuteUniformData(const std::vector<esint> &permutation, const std::vector<size_t> &distribution) { // profiler::syncstart("permute_uniform_data"); // profiler::syncparam("esize", _edatasize); // profiler::syncparam("elements", permutation.size()); std::vector<std::vector<TEData> > pdata(distribution.size() - 1); #pragma omp parallel for for (size_t t = 0; t < distribution.size() - 1; t++) { for (size_t i = distribution[t]; i < distribution[t + 1]; ++i) { pdata[t].insert(pdata[t].end(), _edata.data() + _edatasize * permutation[i], _edata.data() + _edatasize * (permutation[i] + 1)); } } _edata = tarray<TEData>(pdata); inititerators(_edatasize); // profiler::syncend("permute_uniform_data"); } void permuteNonUniformData(const std::vector<esint> &permutation, const std::vector<size_t> &providedDistribution) { // profiler::syncstart("permute_non_uniform_data"); // profiler::syncparam("elements", permutation.size()); // profiler::syncparam("datasize", _eboundaries.back()); std::vector<std::vector<TEBoundaries> > pboundaries(providedDistribution.size() - 1); std::vector<std::vector<TEData> > pdata(providedDistribution.size() - 1); std::vector<size_t> distribution = providedDistribution; if (_eboundaries.distribution().back() > distribution.back()) { for (size_t t = 1; t < providedDistribution.size(); t++) { distribution[t] += 1; } } pboundaries.front().push_back(0); #pragma omp parallel for for (size_t t = 0; t < providedDistribution.size() - 1; t++) { for (size_t e = (t == 0 ? 1 : distribution[t]); e < distribution[t + 1]; ++e) { pboundaries[t].push_back(_eboundaries.data()[permutation[e - 1] + 1] - _eboundaries.data()[permutation[e - 1]]); pdata[t].insert( pdata[t].end(), _edata.data() + _eboundaries.data()[permutation[e - 1]], _edata.data() + _eboundaries.data()[permutation[e - 1] + 1]); if (pboundaries[t].size() > 1) { pboundaries[t].back() += *(pboundaries[t].end() - 2); } } } std::vector<size_t> offsets; for (size_t t = 0; t < pboundaries.size(); t++) { offsets.push_back(pboundaries[t].size() ? pboundaries[t].back() : 0); } TEBoundaries sum = 0; for (size_t i = 0; i < offsets.size(); i++) { TEBoundaries tmp = offsets[i]; offsets[i] = sum; sum += tmp; } #pragma omp parallel for for (size_t t = 0; t < pboundaries.size(); t++) { size_t offset = offsets[t]; for (size_t i = 0; i < pboundaries[t].size(); i++) { pboundaries[t][i] += offset; } } _eboundaries = tarray<TEBoundaries>(pboundaries); _edata = tarray<TEData>(pdata); inititerators(); // profiler::syncend("permute_non_uniform_data"); } tarray<TEBoundaries> _eboundaries; tarray<TEData> _edata; esint _edatasize; std::vector<iterator> _iterator; std::vector<const_iterator> _constiterator; }; namespace utils { template <typename TEBoundaries, typename TEData> inline size_t packedSize(serializededata<TEBoundaries, TEData> *data) { if (data != NULL) { return 1 + data->packedSize(); } return 1; } template <typename TEBoundaries, typename TEData> inline void pack(serializededata<TEBoundaries, TEData> *data, char* &p) { pack(data != NULL, p); if (data != NULL) { data->pack(p); } } template <typename TEBoundaries, typename TEData> inline void unpack(serializededata<TEBoundaries, TEData> *&data, const char* &p) { if (data != NULL) { delete data; data = NULL; } bool notnull; unpack(notnull, p); if (notnull) { data = new serializededata<TEBoundaries, TEData>(tarray<TEBoundaries>(0, 0), tarray<TEData>(0, 0)); data->unpack(p); } } } // namespace utils } // namespace mesio #endif /* SRC_BASIS_CONTAINERS_SERIALIZEDEDATA_H_ */
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; (void) GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } (void) GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { PixelInfo background; /* Remove transparency. */ ConformPixelInfo(image,&image->background_color,&background,exception); background.alpha_trait=BlendPixelTrait; image->alpha_trait=BlendPixelTrait; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=background; for (x=0; x < (ssize_t) image->columns; x++) { pixel.alpha=GetPixelIntensity(image,q); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
GB_unop__identity_uint32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_fc32 // op(A') function: GB_unop_tran__identity_uint32_fc32 // C type: uint32_t // A type: GxB_FC32_t // cast: uint32_t cij = GB_cast_to_uint32_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_fc32 ( uint32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_winograd_transform_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd64_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd42_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd42_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } }
10_omp_empty.c
// clang-format off // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // REQUIRES: openmp // clang-format on #include "omp.h" // CHECK-NOT: {{.*}} __typeart_alloc void foo(int* x) { #pragma omp parallel // transformed to @__kmpc_fork_call { *x = -1; } #pragma omp parallel for for (int i = 0; i < x[10]; ++i) { x[i] = i; } } // Standard filter // CHECK: > Stack Memory // CHECK-NEXT: Alloca : // CHECK-NEXT: Stack call filtered % : 100.00
DataGen.h
// Copyright (C) 2019-2020 Zilliz. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under the License #pragma once #include "common/Schema.h" #include <random> #include <memory> #include <cstring> #include "segcore/SegmentGrowing.h" #include "segcore/SegmentSealed.h" #include "Constants.h" #include <boost/algorithm/string/predicate.hpp> #include "segcore/SegmentSealed.h" #include <knowhere/index/vector_index/VecIndex.h> #include <knowhere/index/vector_index/adapter/VectorAdapter.h> #include <knowhere/index/vector_index/VecIndexFactory.h> #include <knowhere/index/vector_index/IndexIVF.h> #include <query/SearchOnIndex.h> using boost::algorithm::starts_with; namespace milvus::segcore { struct GeneratedData { std::vector<char> rows_; std::vector<aligned_vector<uint8_t>> cols_; std::vector<idx_t> row_ids_; std::vector<Timestamp> timestamps_; RowBasedRawData raw_; template <typename T> auto get_col(int index) const { auto& target = cols_.at(index); std::vector<T> ret(target.size() / sizeof(T)); memcpy(ret.data(), target.data(), target.size()); return ret; } template <typename T> auto get_mutable_col(int index) { auto& target = cols_.at(index); assert(target.size() == row_ids_.size() * sizeof(T)); auto ptr = reinterpret_cast<T*>(target.data()); return ptr; } private: GeneratedData() = default; friend GeneratedData DataGen(SchemaPtr schema, int64_t N, uint64_t seed); void generate_rows(int64_t N, SchemaPtr schema); }; inline void GeneratedData::generate_rows(int64_t N, SchemaPtr schema) { std::vector<int> offset_infos(schema->size() + 1, 0); auto sizeof_infos = schema->get_sizeof_infos(); std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1); int64_t len_per_row = offset_infos.back(); assert(len_per_row == schema->get_total_sizeof()); std::vector<char> result(len_per_row * N); for (int index = 0; index < N; ++index) { for (int fid = 0; fid < schema->size(); ++fid) { auto len = sizeof_infos[fid]; auto offset = offset_infos[fid]; auto src = cols_[fid].data() + index * len; auto dst = result.data() + offset + index * len_per_row; memcpy(dst, src, len); } } rows_ = std::move(result); raw_.raw_data = rows_.data(); raw_.sizeof_per_row = schema->get_total_sizeof(); raw_.count = N; } inline GeneratedData DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42) { using std::vector; std::vector<aligned_vector<uint8_t>> cols; std::default_random_engine er(seed); std::normal_distribution<> distr(0, 1); int offset = 0; auto insert_cols = [&cols](auto& data) { using T = std::remove_reference_t<decltype(data)>; auto len = sizeof(typename T::value_type) * data.size(); auto ptr = aligned_vector<uint8_t>(len); memcpy(ptr.data(), data.data(), len); cols.emplace_back(std::move(ptr)); }; for (auto& field : schema->get_fields()) { switch (field.get_data_type()) { case engine::DataType::VECTOR_FLOAT: { auto dim = field.get_dim(); vector<float> final(dim * N); bool is_ip = starts_with(field.get_name().get(), "normalized"); #pragma omp parallel for for (int n = 0; n < N; ++n) { vector<float> data(dim); float sum = 0; std::default_random_engine er2(seed + n); std::normal_distribution<> distr2(0, 1); for (auto& x : data) { x = distr2(er2) + offset; sum += x * x; } if (is_ip) { sum = sqrt(sum); for (auto& x : data) { x /= sum; } } std::copy(data.begin(), data.end(), final.begin() + dim * n); } insert_cols(final); break; } case engine::DataType::VECTOR_BINARY: { auto dim = field.get_dim(); Assert(dim % 8 == 0); vector<uint8_t> data(dim / 8 * N); for (auto& x : data) { x = er(); } insert_cols(data); break; } case engine::DataType::INT64: { vector<int64_t> data(N); // begin with counter if (starts_with(field.get_name().get(), "counter")) { int64_t index = 0; for (auto& x : data) { x = index++; } } else { for (auto& x : data) { x = er() % (2 * N); } } insert_cols(data); break; } case engine::DataType::INT32: { vector<int> data(N); for (auto& x : data) { x = er() % (2 * N); } insert_cols(data); break; } case engine::DataType::FLOAT: { vector<float> data(N); for (auto& x : data) { x = distr(er); } insert_cols(data); break; } case engine::DataType::DOUBLE: { vector<double> data(N); for (auto& x : data) { x = distr(er); } insert_cols(data); break; } default: { throw std::runtime_error("unimplemented"); } } ++offset; } GeneratedData res; res.cols_ = std::move(cols); for (int i = 0; i < N; ++i) { res.row_ids_.push_back(i); res.timestamps_.push_back(i); } std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er); res.generate_rows(N, schema); return std::move(res); } inline auto CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) { namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::FloatVector); std::normal_distribution<double> dis(0, 1); std::default_random_engine e(seed); for (int i = 0; i < num_queries; ++i) { std::vector<float> vec; for (int d = 0; d < dim; ++d) { vec.push_back(dis(e)); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size() * sizeof(float)); } return raw_group; } inline auto CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) { namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::FloatVector); int64_t src_index = 0; for (int i = 0; i < num_queries; ++i) { std::vector<float> vec; for (int d = 0; d < dim; ++d) { vec.push_back(src[src_index++]); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size() * sizeof(float)); } return raw_group; } inline auto CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) { assert(dim % 8 == 0); namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::BinaryVector); std::default_random_engine e(seed); for (int i = 0; i < num_queries; ++i) { std::vector<uint8_t> vec; for (int d = 0; d < dim / 8; ++d) { vec.push_back(e()); } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size()); } return raw_group; } inline auto CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) { assert(dim % 8 == 0); namespace ser = milvus::proto::milvus; ser::PlaceholderGroup raw_group; auto value = raw_group.add_placeholders(); value->set_tag("$0"); value->set_type(ser::PlaceholderType::BinaryVector); for (int i = 0; i < num_queries; ++i) { std::vector<uint8_t> vec; for (int d = 0; d < dim / 8; ++d) { vec.push_back(*ptr); ++ptr; } // std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float)); value->add_values(vec.data(), vec.size()); } return raw_group; } inline json SearchResultToJson(const SearchResult& sr) { int64_t num_queries = sr.num_queries_; int64_t topk = sr.topk_; std::vector<std::vector<std::string>> results; for (int q = 0; q < num_queries; ++q) { std::vector<std::string> result; for (int k = 0; k < topk; ++k) { int index = q * topk + k; result.emplace_back(std::to_string(sr.internal_seg_offsets_[index]) + "->" + std::to_string(sr.result_distances_[index])); } results.emplace_back(std::move(result)); } return json{results}; }; inline void SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) { // TODO auto row_count = dataset.row_ids_.size(); { LoadFieldDataInfo info; info.blob = dataset.row_ids_.data(); info.row_count = dataset.row_ids_.size(); info.field_id = 0; // field id for RowId seg.LoadFieldData(info); } { LoadFieldDataInfo info; info.blob = dataset.timestamps_.data(); info.row_count = dataset.timestamps_.size(); info.field_id = 1; seg.LoadFieldData(info); } int field_offset = 0; for (auto& meta : seg.get_schema().get_fields()) { LoadFieldDataInfo info; info.field_id = meta.get_id().get(); info.row_count = row_count; info.blob = dataset.cols_[field_offset].data(); seg.LoadFieldData(info); ++field_offset; } } inline std::unique_ptr<SegmentSealed> SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) { auto segment = CreateSealedSegment(schema); SealedLoader(dataset, *segment); segment->LoadIndex(index_info); return segment; } inline knowhere::VecIndexPtr GenIndexing(int64_t N, int64_t dim, const float* vec) { // {knowhere::IndexParams::nprobe, 10}, auto conf = knowhere::Config{{knowhere::meta::DIM, dim}, {knowhere::IndexParams::nlist, 1024}, {knowhere::Metric::TYPE, milvus::knowhere::Metric::L2}, {knowhere::meta::DEVICEID, 0}}; auto database = knowhere::GenDataset(N, dim, vec); auto indexing = std::make_shared<knowhere::IVF>(); indexing->Train(database, conf); indexing->AddWithoutIds(database, conf); return indexing; } } // namespace milvus::segcore
configurator.c
/* Simple tool to create config.h. * Would be much easier with ccan modules, but deliberately standalone. * * Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license. * * c12r_err, c12r_errx functions copied from ccan/err/err.c * Copyright Rusty Russell <rusty@rustcorp.com.au>. CC0 (Public domain) License. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #define _POSIX_C_SOURCE 200809L /* For pclose, popen, strdup */ #define EXIT_BAD_USAGE 1 #define EXIT_TROUBLE_RUNNING 2 #define EXIT_BAD_TEST 3 #define EXIT_BAD_INPUT 4 #include <errno.h> #include <stdio.h> #include <stdarg.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #ifdef _MSC_VER #define popen _popen #define pclose _pclose #endif #ifdef _MSC_VER #define DEFAULT_COMPILER "cl" /* Note: Dash options avoid POSIX path conversion when used under msys bash * and are therefore preferred to slash (e.g. -nologo over /nologo) * Note: Disable Warning 4200 "nonstandard extension used : zero-sized array * in struct/union" for flexible array members. */ #define DEFAULT_FLAGS "-nologo -Zi -W4 -wd4200 " \ "-D_CRT_NONSTDC_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS" #define DEFAULT_OUTPUT_EXE_FLAG "-Fe:" #else #define DEFAULT_COMPILER "cc" #define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition" #define DEFAULT_OUTPUT_EXE_FLAG "-o" #endif #define OUTPUT_FILE "configurator.out" #define INPUT_FILE "configuratortest.c" #ifdef _WIN32 #define DIR_SEP "\\" #else #define DIR_SEP "/" #endif static const char *progname = ""; static int verbose; static bool like_a_libtool = false; struct test { const char *name; const char *desc; /* * Template style flags (pick one): * OUTSIDE_MAIN: * - put a simple boilerplate main below it. * DEFINES_FUNC: * - defines a static function called func; adds ref to avoid warnings * INSIDE_MAIN: * - put this inside main(). * DEFINES_EVERYTHING: * - don't add any boilerplate at all. * * Execution flags: * EXECUTE: * - a runtime test; must compile, exit 0 means flag is set. * MAY_NOT_COMPILE: * - Only useful with EXECUTE: don't get upset if it doesn't compile. * <nothing>: * - a compile test, if it compiles must run and exit 0. */ const char *style; const char *depends; const char *link; const char *fragment; const char *flags; const char *overrides; /* On success, force this to '1' */ bool done; bool answer; }; /* Terminated by a NULL name */ static struct test *tests; static const struct test base_tests[] = { { "HAVE_UNALIGNED_ACCESS", "unaligned access to int", "DEFINES_EVERYTHING|EXECUTE", NULL, NULL, "#include <string.h>\n" "int main(int argc, char *argv[]) {\n" " (void)argc;\n" " char pad[sizeof(int *) * 1];\n" " memcpy(pad, argv[0], sizeof(pad));\n" " int *x = (int *)pad, *y = (int *)(pad + 1);\n" " return *x == *y;\n" "}\n" }, { "HAVE_TYPEOF", "__typeof__ support", "INSIDE_MAIN", NULL, NULL, "__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" }, { "HAVE_BIG_ENDIAN", "big endian", "INSIDE_MAIN|EXECUTE", NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" }, { "HAVE_BYTESWAP_H", "<byteswap.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <byteswap.h>\n" }, { "HAVE_BSWAP_64", "bswap64 in byteswap.h", "DEFINES_FUNC", "HAVE_BYTESWAP_H", NULL, "#include <byteswap.h>\n" "static int func(int x) { return bswap_64(x); }" }, { "HAVE_LITTLE_ENDIAN", "little endian", "INSIDE_MAIN|EXECUTE", NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" }, /* { "HAVE_32BIT_OFF_T", "off_t is 32 bits", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 4 ? 0 : 1;\n" "}\n" }, { "HAVE_ALIGNOF", "__alignof__ support", "INSIDE_MAIN", NULL, NULL, "return __alignof__(double) > 0 ? 0 : 1;" }, { "HAVE_ASPRINTF", "asprintf() declaration", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdio.h>\n" "static char *func(int x) {" " char *p;\n" " if (asprintf(&p, \"%u\", x) == -1) \n" " p = NULL;\n" " return p;\n" "}" }, { "HAVE_ATTRIBUTE_COLD", "__attribute__((cold)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((cold)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_CONST", "__attribute__((const)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((const)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_DEPRECATED", "__attribute__((deprecated)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((deprecated)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_NONNULL", "__attribute__((nonnull)) support", "DEFINES_FUNC", NULL, NULL, "static char *__attribute__((nonnull)) func(char *p) { return p; }" }, { "HAVE_ATTRIBUTE_RETURNS_NONNULL", "__attribute__((returns_nonnull)) support", "DEFINES_FUNC", NULL, NULL, "static const char *__attribute__((returns_nonnull)) func(void) { return \"hi\"; }" }, { "HAVE_ATTRIBUTE_SENTINEL", "__attribute__((sentinel)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((sentinel)) func(int i, ...) { return i; }" }, { "HAVE_ATTRIBUTE_PURE", "__attribute__((pure)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((pure)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_MAY_ALIAS", "__attribute__((may_alias)) support", "OUTSIDE_MAIN", NULL, NULL, "typedef short __attribute__((__may_alias__)) short_a;" }, { "HAVE_ATTRIBUTE_NORETURN", "__attribute__((noreturn)) support", "DEFINES_FUNC", NULL, NULL, "#include <stdlib.h>\n" "static void __attribute__((noreturn)) func(int x) { exit(x); }" }, { "HAVE_ATTRIBUTE_PRINTF", "__attribute__ format printf support", "DEFINES_FUNC", NULL, NULL, "static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" }, { "HAVE_ATTRIBUTE_UNUSED", "__attribute__((unused)) support", "OUTSIDE_MAIN", NULL, NULL, "static int __attribute__((unused)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_USED", "__attribute__((used)) support", "OUTSIDE_MAIN", NULL, NULL, "static int __attribute__((used)) func(int x) { return x; }" }, { "HAVE_BACKTRACE", "backtrace() in <execinfo.h>", "DEFINES_FUNC", NULL, NULL, "#include <execinfo.h>\n" "static int func(int x) {" " void *bt[10];\n" " return backtrace(bt, 10) < x;\n" "}" }, { "HAVE_BUILTIN_CHOOSE_EXPR", "__builtin_choose_expr support", "INSIDE_MAIN", NULL, NULL, "return __builtin_choose_expr(1, 0, \"garbage\");" }, { "HAVE_BUILTIN_CLZ", "__builtin_clz support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZL", "__builtin_clzl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZLL", "__builtin_clzll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZ", "__builtin_ctz support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZL", "__builtin_ctzl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZLL", "__builtin_ctzll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CONSTANT_P", "__builtin_constant_p support", "INSIDE_MAIN", NULL, NULL, "return __builtin_constant_p(1) ? 0 : 1;" }, { "HAVE_BUILTIN_EXPECT", "__builtin_expect support", "INSIDE_MAIN", NULL, NULL, "return __builtin_expect(argc == 1, 1) ? 0 : 1;" }, { "HAVE_BUILTIN_FFS", "__builtin_ffs support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffs(0) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSL", "__builtin_ffsl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffsl(0L) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSLL", "__builtin_ffsll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffsll(0LL) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNT", "__builtin_popcount support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcount(255) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNTL", "__builtin_popcountl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcountl(255L) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNTLL", "__builtin_popcountll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcountll(255LL) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_TYPES_COMPATIBLE_P", "__builtin_types_compatible_p support", "INSIDE_MAIN", NULL, NULL, "return __builtin_types_compatible_p(char *, int) ? 1 : 0;" }, { "HAVE_ICCARM_INTRINSICS", "<intrinsics.h>", "DEFINES_FUNC", NULL, NULL, "#include <intrinsics.h>\n" "int func(int v) {\n" " return __CLZ(__RBIT(v));\n" "}" }, { "HAVE_CLOCK_GETTIME", "clock_gettime() declaration", "DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC", NULL, "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n" }, { "HAVE_CLOCK_GETTIME_IN_LIBRT", "clock_gettime() in librt", "DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME", "-lrt", "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n", "HAVE_CLOCK_GETTIME" }, { "HAVE_COMPOUND_LITERALS", "compound literal support", "INSIDE_MAIN", NULL, NULL, "int *foo = (int[]) { 1, 2, 3, 4 };\n" "return foo[0] ? 0 : 1;" }, { "HAVE_FCHDIR", "fchdir support", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int fd = open(\"..\", O_RDONLY);\n" " return fchdir(fd) == 0 ? 0 : 1;\n" "}\n" }, { "HAVE_ERR_H", "<err.h>", "DEFINES_FUNC", NULL, NULL, "#include <err.h>\n" "static void func(int arg) {\n" " if (arg == 0)\n" " err(1, \"err %u\", arg);\n" " if (arg == 1)\n" " errx(1, \"err %u\", arg);\n" " if (arg == 3)\n" " warn(\"warn %u\", arg);\n" " if (arg == 4)\n" " warnx(\"warn %u\", arg);\n" "}\n" }, { "HAVE_FILE_OFFSET_BITS", "_FILE_OFFSET_BITS to get 64-bit offsets", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", "HAVE_32BIT_OFF_T", NULL, "#define _FILE_OFFSET_BITS 64\n" "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 8 ? 0 : 1;\n" "}\n" }, { "HAVE_FOR_LOOP_DECLARATION", "for loop declaration support", "INSIDE_MAIN", NULL, NULL, "int ret = 1;\n" "for (int i = 0; i < argc; i++) { ret = 0; };\n" "return ret;" }, { "HAVE_FLEXIBLE_ARRAY_MEMBER", "flexible array member support", "OUTSIDE_MAIN", NULL, NULL, "struct foo { unsigned int x; int arr[]; };" }, { "HAVE_GETPAGESIZE", "getpagesize() in <unistd.h>", "DEFINES_FUNC", NULL, NULL, "#include <unistd.h>\n" "static int func(void) { return getpagesize(); }" }, { "HAVE_ISBLANK", "isblank() in <ctype.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <ctype.h>\n" "static int func(void) { return isblank(' '); }" }, { "HAVE_MEMMEM", "memmem in <string.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *h, size_t hl, void *n, size_t nl) {\n" "return memmem(h, hl, n, nl);" "}\n", }, { "HAVE_MEMRCHR", "memrchr in <string.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *s, int c, size_t n) {\n" "return memrchr(s, c, n);" "}\n", }, { "HAVE_MMAP", "mmap() declaration", "DEFINES_FUNC", NULL, NULL, "#include <sys/mman.h>\n" "static void *func(int fd) {\n" " return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n" "}" }, { "HAVE_PROC_SELF_MAPS", "/proc/self/maps exists", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "int main(void) {\n" " return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n" "}\n" }, { "HAVE_QSORT_R_PRIVATE_LAST", "qsort_r cmp takes trailing arg", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdlib.h>\n" "static int cmp(const void *lp, const void *rp, void *priv) {\n" " *(unsigned int *)priv = 1;\n" " return *(const int *)lp - *(const int *)rp; }\n" "int main(void) {\n" " int array[] = { 9, 2, 5 };\n" " unsigned int called = 0;\n" " qsort_r(array, 3, sizeof(int), cmp, &called);\n" " return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n" "}\n" }, { "HAVE_STRUCT_TIMESPEC", "struct timespec declaration", "DEFINES_FUNC", NULL, NULL, "#include <time.h>\n" "static void func(void) {\n" " struct timespec ts;\n" " ts.tv_sec = ts.tv_nsec = 1;\n" "}\n" }, { "HAVE_SECTION_START_STOP", "__attribute__((section)) and __start/__stop", "DEFINES_FUNC", NULL, NULL, "static void *__attribute__((__section__(\"mysec\"))) p = &p;\n" "static int func(void) {\n" " extern void *__start_mysec[], *__stop_mysec[];\n" " return __stop_mysec - __start_mysec;\n" "}\n" }, { "HAVE_STACK_GROWS_UPWARDS", "stack grows upwards", "DEFINES_EVERYTHING|EXECUTE", NULL, NULL, "#include <stddef.h>\n" "static ptrdiff_t nest(const void *base, unsigned int i)\n" "{\n" " if (i == 0)\n" " return (const char *)&i - (const char *)base;\n" " return nest(base, i-1);\n" "}\n" "int main(int argc, char *argv[]) {\n" " (void)argv;\n" " return (nest(&argc, argc) > 0) ? 0 : 1;\n" "}\n" }, { "HAVE_STATEMENT_EXPR", "statement expression support", "INSIDE_MAIN", NULL, NULL, "return ({ int x = argc; x == argc ? 0 : 1; });" }, { "HAVE_SYS_FILIO_H", "<sys/filio.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <sys/filio.h>\n" }, { "HAVE_SYS_TERMIOS_H", "<sys/termios.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <sys/termios.h>\n" }, { "HAVE_SYS_UNISTD_H", "<sys/unistd.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <sys/unistd.h>\n" }, { "HAVE_UTIME", "utime() declaration", "DEFINES_FUNC", NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static int func(const char *filename) {\n" " struct utimbuf times = { 0 };\n" " return utime(filename, &times);\n" "}" }, { "HAVE_WARN_UNUSED_RESULT", "__attribute__((warn_unused_result))", "DEFINES_FUNC", NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static __attribute__((warn_unused_result)) int func(int i) {\n" " return i + 1;\n" "}" }, { "HAVE_OPENMP", "#pragma omp and -fopenmp support", "INSIDE_MAIN|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "int i;\n" "#pragma omp parallel for\n" "for(i = 0; i < 0; i++) {};\n" "return 0;\n", "-Werror -fopenmp" }, { "HAVE_VALGRIND_MEMCHECK_H", "<valgrind/memcheck.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <valgrind/memcheck.h>\n" }, { "HAVE_UCONTEXT", "working <ucontext.h", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <ucontext.h>\n" "static int x = 0;\n" "static char stack[2048];\n" "static ucontext_t a, b;\n" "static void fn(void) {\n" " x |= 2;\n" " setcontext(&b);\n" " x |= 4;\n" "}\n" "int main(void) {\n" " x |= 1;\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, fn, 0);\n" " swapcontext(&b, &a);\n" " return (x == 3) ? 0 : 1;\n" "}\n" }, { "HAVE_POINTER_SAFE_MAKECONTEXT", "passing pointers via makecontext()", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", "HAVE_UCONTEXT", NULL, "#include <stddef.h>\n" "#include <ucontext.h>\n" "static int worked = 0;\n" "static char stack[1024];\n" "static ucontext_t a, b;\n" "static void fn(void *p, void *q) {\n" " void *cp = &worked;\n" " void *cq = (void *)(~((ptrdiff_t)cp));\n" " if ((p == cp) && (q == cq))\n" " worked = 1;\n" " setcontext(&b);\n" "}\n" "int main(void) {\n" " void *ap = &worked;\n" " void *aq = (void *)(~((ptrdiff_t)ap));\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n" " swapcontext(&b, &a);\n" " return worked ? 0 : 1;\n" "}\n" }, { "HAVE_BUILTIN_CPU_SUPPORTS", "__builtin_cpu_supports()", "DEFINES_FUNC", NULL, NULL, "#include <stdbool.h>\n" "static bool func(void) {\n" " return __builtin_cpu_supports(\"mmx\");\n" "}" }, { "HAVE_CLOSEFROM", "closefrom() offered by system", "DEFINES_EVERYTHING", NULL, NULL, "#include <stdlib.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " closefrom(STDERR_FILENO + 1);\n" " return 0;\n" "}\n" }, { "HAVE_F_CLOSEM", "F_CLOSEM defined for fctnl.", "DEFINES_EVERYTHING", NULL, NULL, "#include <fcntl.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int res = fcntl(STDERR_FILENO + 1, F_CLOSEM, 0);\n" " return res < 0;\n" "}\n" }, { "HAVE_NR_CLOSE_RANGE", "close_range syscall available as __NR_close_range.", "DEFINES_EVERYTHING", NULL, NULL, "#include <limits.h>\n" "#include <sys/syscall.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int res = syscall(__NR_close_range, STDERR_FILENO + 1, INT_MAX, 0);\n" " return res < 0;\n" "}\n" }, { "HAVE_F_MAXFD", "F_MAXFD defined for fcntl.", "DEFINES_EVERYTHING", NULL, NULL, "#include <fcntl.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int res = fcntl(0, F_MAXFD);\n" " return res < 0;\n" "}\n" }, */ }; static void c12r_err(int eval, const char *fmt, ...) { int err_errno = errno; va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, ": %s\n", strerror(err_errno)); exit(eval); } static void c12r_errx(int eval, const char *fmt, ...) { va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); exit(eval); } static void start_test(const char *what, const char *why) { if (like_a_libtool) { printf("%s%s... ", what, why); fflush(stdout); } } static void end_test(bool result) { if (like_a_libtool) printf("%s\n", result ? "yes" : "no"); } static size_t fcopy(FILE *fsrc, FILE *fdst) { char buffer[BUFSIZ]; size_t rsize, wsize; size_t copied = 0; while ((rsize = fread(buffer, 1, BUFSIZ, fsrc)) > 0) { wsize = fwrite(buffer, 1, rsize, fdst); copied += wsize; if (wsize != rsize) break; } return copied; } static char *grab_stream(FILE *file) { size_t max, ret, size = 0; char *buffer; max = BUFSIZ; buffer = malloc(max); while ((ret = fread(buffer+size, 1, max - size, file)) == max - size) { size += ret; buffer = realloc(buffer, max *= 2); } size += ret; if (ferror(file)) c12r_err(EXIT_TROUBLE_RUNNING, "reading from command"); buffer[size] = '\0'; return buffer; } static char *run(const char *cmd, int *exitstatus) { static const char redir[] = " 2>&1"; size_t cmdlen; char *cmdredir; FILE *cmdout; char *ret; cmdlen = strlen(cmd); cmdredir = malloc(cmdlen + sizeof(redir)); memcpy(cmdredir, cmd, cmdlen); memcpy(cmdredir + cmdlen, redir, sizeof(redir)); cmdout = popen(cmdredir, "r"); if (!cmdout) c12r_err(EXIT_TROUBLE_RUNNING, "popen \"%s\"", cmdredir); free(cmdredir); ret = grab_stream(cmdout); *exitstatus = pclose(cmdout); return ret; } static char *connect_args(const char *argv[], const char *outflag, const char *files) { unsigned int i; char *ret; size_t len = strlen(outflag) + strlen(files) + 1; for (i = 1; argv[i]; i++) len += 1 + strlen(argv[i]); ret = malloc(len); len = 0; for (i = 1; argv[i]; i++) { strcpy(ret + len, argv[i]); len += strlen(argv[i]); if (argv[i+1] || *outflag) ret[len++] = ' '; } strcpy(ret + len, outflag); len += strlen(outflag); strcpy(ret + len, files); return ret; } static struct test *find_test(const char *name) { unsigned int i; for (i = 0; tests[i].name; i++) { if (strcmp(tests[i].name, name) == 0) return &tests[i]; } c12r_errx(EXIT_BAD_TEST, "Unknown test %s", name); abort(); } #define PRE_BOILERPLATE "/* Test program generated by configurator. */\n" #define MAIN_START_BOILERPLATE \ "int main(int argc, char *argv[]) {\n" \ " (void)argc;\n" \ " (void)argv;\n" #define USE_FUNC_BOILERPLATE "(void)func;\n" #define MAIN_BODY_BOILERPLATE "return 0;\n" #define MAIN_END_BOILERPLATE "}\n" static bool run_test(const char *cmd, const char *wrapper, struct test *test) { char *output, *newcmd; FILE *outf; int status; if (test->done) return test->answer; if (test->depends) { size_t len; const char *deps = test->depends; char *dep; /* Space-separated dependencies, could be ! for inverse. */ while ((len = strcspn(deps, " ")) != 0) { bool positive = true; if (deps[len]) { dep = strdup(deps); dep[len] = '\0'; } else { dep = (char *)deps; } if (dep[0] == '!') { dep++; positive = false; } if (run_test(cmd, wrapper, find_test(dep)) != positive) { test->answer = false; test->done = true; return test->answer; } if (deps[len]) free(dep); deps += len; deps += strspn(deps, " "); } } outf = fopen(INPUT_FILE, verbose > 1 ? "w+" : "w"); if (!outf) c12r_err(EXIT_TROUBLE_RUNNING, "creating %s", INPUT_FILE); fprintf(outf, "%s", PRE_BOILERPLATE); if (strstr(test->style, "INSIDE_MAIN")) { fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "OUTSIDE_MAIN")) { fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "DEFINES_FUNC")) { fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", USE_FUNC_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "DEFINES_EVERYTHING")) { fprintf(outf, "%s", test->fragment); } else c12r_errx(EXIT_BAD_TEST, "Unknown style for test %s: %s", test->name, test->style); if (verbose > 1) { fseek(outf, 0, SEEK_SET); fcopy(outf, stdout); } fclose(outf); newcmd = strdup(cmd); if (test->flags) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->flags) + 1); strcat(newcmd, " "); strcat(newcmd, test->flags); if (verbose > 1) printf("Extra flags line: %s", newcmd); } if (test->link) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->link) + 1); strcat(newcmd, " "); strcat(newcmd, test->link); if (verbose > 1) printf("Extra link line: %s", newcmd); } start_test("checking for ", test->desc); output = run(newcmd, &status); free(newcmd); if (status != 0 || strstr(output, "warning")) { if (verbose) printf("Compile %s for %s, status %i: %s\n", status ? "fail" : "warning", test->name, status, output); if (strstr(test->style, "EXECUTE") && !strstr(test->style, "MAY_NOT_COMPILE")) c12r_errx(EXIT_BAD_TEST, "Test for %s did not compile:\n%s", test->name, output); test->answer = false; free(output); } else { /* Compile succeeded. */ free(output); /* We run INSIDE_MAIN tests for sanity checking. */ if (strstr(test->style, "EXECUTE") || strstr(test->style, "INSIDE_MAIN")) { char *cmd = malloc(strlen(wrapper) + strlen(" ." DIR_SEP OUTPUT_FILE) + 1); strcpy(cmd, wrapper); strcat(cmd, " ." DIR_SEP OUTPUT_FILE); output = run(cmd, &status); free(cmd); if (!strstr(test->style, "EXECUTE") && status != 0) c12r_errx(EXIT_BAD_TEST, "Test for %s failed with %i:\n%s", test->name, status, output); if (verbose && status) printf("%s exited %i\n", test->name, status); free(output); } test->answer = (status == 0); } test->done = true; end_test(test->answer); if (test->answer && test->overrides) { struct test *override = find_test(test->overrides); override->done = true; override->answer = true; } return test->answer; } static char *any_field(char **fieldname) { char buf[1000]; for (;;) { char *p, *eq; if (!fgets(buf, sizeof(buf), stdin)) return NULL; p = buf; /* Ignore whitespace, lines starting with # */ while (*p == ' ' || *p == '\t') p++; if (*p == '#' || *p == '\n') continue; eq = strchr(p, '='); if (!eq) c12r_errx(EXIT_BAD_INPUT, "no = in line: %s", p); *eq = '\0'; *fieldname = strdup(p); p = eq + 1; if (strlen(p) && p[strlen(p)-1] == '\n') p[strlen(p)-1] = '\0'; return strdup(p); } } static char *read_field(const char *name, bool compulsory) { char *fieldname, *value; value = any_field(&fieldname); if (!value) { if (!compulsory) return NULL; c12r_errx(EXIT_BAD_INPUT, "Could not read field %s", name); } if (strcmp(fieldname, name) != 0) c12r_errx(EXIT_BAD_INPUT, "Expected field %s not %s", name, fieldname); return value; } /* Test descriptions from stdin: * Lines starting with # or whitespace-only are ignored. * * First three non-ignored lines must be: * var=<varname> * desc=<description-for-autotools-style> * style=OUTSIDE_MAIN DEFINES_FUNC INSIDE_MAIN DEFINES_EVERYTHING EXECUTE MAY_NOT_COMPILE * * Followed by optional lines: * depends=<space-separated-testnames, ! to invert> * link=<extra args for link line> * flags=<extra args for compile line> * overrides=<testname-to-force> * * Finally a code line, either: * code=<oneline> OR * code= * <lines of code> * <end-comment> * * And <end-comment> looks like this next comment: */ /*END*/ static bool read_test(struct test *test) { char *field, *value; char buf[1000]; memset(test, 0, sizeof(*test)); test->name = read_field("var", false); if (!test->name) return false; test->desc = read_field("desc", true); test->style = read_field("style", true); /* Read any optional fields. */ while ((value = any_field(&field)) != NULL) { if (strcmp(field, "depends") == 0) test->depends = value; else if (strcmp(field, "link") == 0) test->link = value; else if (strcmp(field, "flags") == 0) test->flags = value; else if (strcmp(field, "overrides") == 0) test->overrides = value; else if (strcmp(field, "code") == 0) break; else c12r_errx(EXIT_BAD_INPUT, "Unknown field %s in %s", field, test->name); } if (!value) c12r_errx(EXIT_BAD_INPUT, "Missing code in %s", test->name); if (strlen(value) == 0) { /* Multiline program, read to END comment */ while (fgets(buf, sizeof(buf), stdin) != 0) { size_t n; if (strncmp(buf, "/*END*/", 7) == 0) break; n = strlen(value); value = realloc(value, n + strlen(buf) + 1); strcpy(value + n, buf); n += strlen(buf); } } test->fragment = value; return true; } static void read_tests(size_t num_tests) { while (read_test(tests + num_tests)) { num_tests++; tests = realloc(tests, (num_tests + 1) * sizeof(tests[0])); tests[num_tests].name = NULL; } } int main(int argc, const char *argv[]) { char *cmd; unsigned int i; const char *default_args[] = { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL }; const char *outflag = DEFAULT_OUTPUT_EXE_FLAG; const char *configurator_cc = NULL; const char *wrapper = ""; const char *orig_cc; const char *varfile = NULL; const char *headerfile = NULL; bool extra_tests = false; FILE *outf; if (argc > 0) progname = argv[0]; while (argc > 1) { if (strcmp(argv[1], "--help") == 0) { printf("Usage: configurator [-v] [--var-file=<filename>] [-O<outflag>] [--configurator-cc=<compiler-for-tests>] [--wrapper=<wrapper-for-tests>] [--autotools-style] [--extra-tests] [<compiler> <flags>...]\n" " <compiler> <flags> will have \"<outflag> <outfile> <infile.c>\" appended\n" "Default: %s %s %s\n", DEFAULT_COMPILER, DEFAULT_FLAGS, DEFAULT_OUTPUT_EXE_FLAG); exit(0); } if (strncmp(argv[1], "-O", 2) == 0) { argc--; argv++; outflag = argv[1] + 2; if (!*outflag) { fprintf(stderr, "%s: option requires an argument -- O\n", argv[0]); exit(EXIT_BAD_USAGE); } } else if (strcmp(argv[1], "-v") == 0) { argc--; argv++; verbose++; } else if (strcmp(argv[1], "-vv") == 0) { argc--; argv++; verbose += 2; } else if (strncmp(argv[1], "--configurator-cc=", 18) == 0) { configurator_cc = argv[1] + 18; argc--; argv++; } else if (strncmp(argv[1], "--wrapper=", 10) == 0) { wrapper = argv[1] + 10; argc--; argv++; } else if (strncmp(argv[1], "--var-file=", 11) == 0) { varfile = argv[1] + 11; argc--; argv++; } else if (strcmp(argv[1], "--autotools-style") == 0) { like_a_libtool = true; argc--; argv++; } else if (strncmp(argv[1], "--header-file=", 14) == 0) { headerfile = argv[1] + 14; argc--; argv++; } else if (strcmp(argv[1], "--extra-tests") == 0) { extra_tests = true; argc--; argv++; } else if (strcmp(argv[1], "--") == 0) { break; } else if (argv[1][0] == '-') { c12r_errx(EXIT_BAD_USAGE, "Unknown option %s", argv[1]); } else { break; } } if (argc == 1) argv = default_args; /* Copy with NULL entry at end */ tests = calloc(sizeof(base_tests)/sizeof(base_tests[0]) + 1, sizeof(base_tests[0])); memcpy(tests, base_tests, sizeof(base_tests)); if (extra_tests) read_tests(sizeof(base_tests)/sizeof(base_tests[0])); orig_cc = argv[1]; if (configurator_cc) argv[1] = configurator_cc; cmd = connect_args(argv, outflag, OUTPUT_FILE " " INPUT_FILE); if (like_a_libtool) { start_test("Making autoconf users comfortable", ""); sleep(1); end_test(1); } for (i = 0; tests[i].name; i++) run_test(cmd, wrapper, &tests[i]); free(cmd); remove(OUTPUT_FILE); remove(INPUT_FILE); if (varfile) { FILE *vars; if (strcmp(varfile, "-") == 0) vars = stdout; else { start_test("Writing variables to ", varfile); vars = fopen(varfile, "a"); if (!vars) c12r_err(EXIT_TROUBLE_RUNNING, "Could not open %s", varfile); } for (i = 0; tests[i].name; i++) fprintf(vars, "%s=%u\n", tests[i].name, tests[i].answer); if (vars != stdout) { if (fclose(vars) != 0) c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", varfile); end_test(1); } } if (headerfile) { start_test("Writing header to ", headerfile); outf = fopen(headerfile, "w"); if (!outf) c12r_err(EXIT_TROUBLE_RUNNING, "Could not open %s", headerfile); } else outf = stdout; fprintf(outf, "/* Generated by CCAN configurator */\n" "#ifndef CCAN_CONFIG_H\n" "#define CCAN_CONFIG_H\n"); fprintf(outf, "#ifndef _GNU_SOURCE\n"); fprintf(outf, "#define _GNU_SOURCE /* Always use GNU extensions. */\n"); fprintf(outf, "#endif\n"); fprintf(outf, "#define CCAN_COMPILER \"%s\"\n", orig_cc); cmd = connect_args(argv + 1, "", ""); fprintf(outf, "#define CCAN_CFLAGS \"%s\"\n", cmd); free(cmd); fprintf(outf, "#define CCAN_OUTPUT_EXE_CFLAG \"%s\"\n\n", outflag); /* This one implies "#include <ccan/..." works, eg. for tdb2.h */ fprintf(outf, "#define HAVE_CCAN 1\n"); for (i = 0; tests[i].name; i++) fprintf(outf, "#define %s %u\n", tests[i].name, tests[i].answer); fprintf(outf, "#endif /* CCAN_CONFIG_H */\n"); if (headerfile) { if (fclose(outf) != 0) c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", headerfile); end_test(1); } return 0; }
stochqn.c
/* Stochastic limited-memory Quasi-Newton optimization Methods for smooth stochastic optimization of both convex and non-convex functions, using search directions computed by an approximated inverse Hessian-vector product, which is obtained through limited-memory BFGS recursive formula. The implementations are based on the following works: * Byrd, R.H., Hansen, S.L., Nocedal, J. and Singer, Y., 2016. "A stochastic quasi-Newton method for large-scale optimization." SIAM Journal on Optimization, 26(2), pp.1008-1031. (SQN) * Schraudolph, N.N., Yu, J. and Günter, S., 2007, March. "A stochastic quasi-Newton method for online convex optimization." In Artificial Intelligence and Statistics (pp. 436-443). (oLBFGS) * Keskar, N.S. and Berahas, A.S., 2016, September. "adaQN: An Adaptive Quasi-Newton Algorithm for Training RNNs." In Joint European Conference on Machine Learning and Knowledge Discovery in Databases (pp. 1-16). Springer, Cham. (adaQN) * Wright, S. and Nocedal, J., 1999. "Numerical optimization." (ch 7) Springer Science, 35(67-68), p.7. (L-BFGS two-loop recursion, and correction pairs based on gradient differences) Written for C99 standard with fixes for compilation with OpenMP 2.0 (e.g. MSVC). BSD 2-Clause License Copyright (c) 2020, David Cortes All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Standard headers */ #include <stdlib.h> #include <string.h> #include <stddef.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef _FOR_R #include <stdio.h> #endif /* Library header */ #include "stochqn.h" /* BLAS functions */ #ifdef _FOR_PYTON #include "findblas.h" /* https://www.github.com/david-cortes/findblas */ #elif defined(_FOR_R) #include "blas_R.h" #include <R_ext/Print.h> #define fprintf(f, message) REprintf(message) #else #include "blasfuns.h" #endif /* --------------- Preprocessor definitions --------------- */ /* Aliasing for compiler optimizations */ #ifdef __cplusplus #if defined(__GNUG__) || defined(__GNUC__) || defined(_MSC_VER) || defined(__clang__) || defined(__INTEL_COMPILER) #define restrict __restrict #else #define restrict #endif #elif defined(_MSC_VER) #define restrict __restrict #elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) #define restrict #endif /* In-lining for faster calls */ #ifndef __cplusplus #if defined(_MSC_VER) #define inline __inline #elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) #define inline #endif #endif /* OpenMP < 3.0 (e.g. MSVC as of 2019) does not support parallel for's with unsigned iterators, and does not support declaring the iterator type in the loop itself */ #ifdef _OPENMP #if (_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64) /* OpenMP < 3.0 */ #define size_t_for #else #define size_t_for size_t #endif #else #define size_t_for size_t #endif #ifndef isnan #ifdef _isnan #define isnan _isnan #else #define isnan(x) ( (x) != (x) ) #endif #endif #ifndef isinf #ifdef _finite #define isinf(x) (!_finite(x)) #else #define isinf(x) ( (x) >= HUGE_VAL || (x) <= -HUGE_VAL ) #endif #endif #define x_avg x_sum /* this is to keep track of when the sum array has been divided */ #define min2(a, b) (((a) < (b))? (a) : (b)) /* --------------- End of preprocessor definitions --------------- */ #ifdef __cplusplus extern "C" { #endif /* --------------- General-purpose helpers --------------- */ static inline void copy_arr(const real_t *restrict src, real_t *restrict dest, const int n, const int nthreads) { /* Note: don't use BLAS dcopy as it's actually much slower */ #if defined(_OPENMP) int i; int chunk_size = n / nthreads; int remainder = n % nthreads; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = min2(nthreads, 2); /* Note: on x86, using more than 2 threads will end up making it slower */ #pragma omp parallel for schedule(static, 1) firstprivate(src, dest, chunk_size, nthreads) num_threads(nthreads_non_const) for (i = 0; i < nthreads; i++){ memcpy(dest + i * chunk_size, src + i * chunk_size, sizeof(real_t) * chunk_size); } if (remainder > 0){ memcpy(dest + nthreads * chunk_size, src + nthreads * chunk_size, sizeof(real_t) * remainder); } #else memcpy(dest, src, sizeof(real_t) * n); #endif } static inline void set_to_zero(real_t arr[], const int n, const int nthreads) { #if defined(_OPENMP) int i; int chunk_size = n / nthreads; int remainder = n % nthreads; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = min2(nthreads, 2); /* Note: on x86 CPUs, using more than 2 threads will make it slower */ #pragma omp parallel for schedule(static, 1) firstprivate(arr, chunk_size, nthreads) num_threads(nthreads_non_const) for (i = 0; i < nthreads; i++){ memset(arr + i * chunk_size, 0, sizeof(real_t) * chunk_size); } if (remainder > 0){ memset(arr + nthreads * chunk_size, 0, sizeof(real_t) * remainder); } #else memset(arr, 0, sizeof(real_t) * n); #endif } static inline void multiply_elemwise(real_t *restrict inout, const real_t *restrict other, const int n, const int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if((n > 1e7) && (nthreads > 4)) schedule(static) firstprivate(inout, other, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) inout[i] *= other[i]; } static inline void difference_elemwise(real_t *restrict out, const real_t *restrict later, const real_t *restrict earlier, const int n, const int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if( (n > 1e7) && (nthreads > 4)) schedule(static) firstprivate(n_szt, out, later, earlier) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) out[i] = later[i] - earlier[i]; } static inline int check_inf_nan(const real_t arr[], const int n, const int nthreads) { size_t n_szt = (size_t) n; int is_wrong = 0; #if defined(_OPENMP) & !defined(_WIN32) &!defined(_WIN64) & (_OPENMP > 201305) /* OpenMP >= 4.0 */ /* Note1: in most cases the array should not have invalid elements Note2: 'omp cancel' is disabled by default through an environmental variable, and it will ignore modifications of it within the same calling program, so it very likely willnot end up cancelling for most use-cases. */ /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; if ( (n > 1e8) && (nthreads > 4) ){ #pragma omp parallel for schedule(static) firstprivate(arr, n_szt) reduction(max: is_wrong) num_threads(nthreads_non_const) for (size_t i = 0; i < n_szt; i++){ if (isinf(arr[i])){ is_wrong = 1; // #pragma omp cancel for } if (isnan(arr[i])){ is_wrong = 1; // #pragma omp cancel for } } } else #endif { for (size_t i = 0; i < n_szt; i++){ if (isinf(arr[i])){return 1;} if (isnan(arr[i])){return 1;} } } if (is_wrong){return 1;} return 0; } static inline void add_to_sum(const real_t *restrict new_values, real_t *restrict sum_arr, const size_t n, const int nthreads) { /* Note: daxpy in MKL is actually slower than this */ #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */ int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; #pragma omp parallel for if((n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(sum_arr, new_values, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) sum_arr[i] += new_values[i]; } static inline void average_from_sum(real_t arr_sum[], const size_t n_summed, const int n) { if (n_summed > 1){ cblas_tscal(n, 1 / (real_t) n_summed, arr_sum, 1); } } /* --------------- End of general-purpose helpers --------------- */ /* Optimizers have a workspace that works pretty much like a C++ class. This is a long piece of code dealing with memory management, you'll probably want to skip it. */ /* --------- Beginning of initializers, deallocators, and updaters -------- */ bfgs_mem* initialize_bfgs_mem(const size_t mem_size, const int n, const real_t min_curvature, const real_t y_reg, const size_t upd_freq) { real_t *s_bak; real_t *y_bak; if (min_curvature > 0){ s_bak = (real_t*) malloc(sizeof(real_t) * n); y_bak = (real_t*) malloc(sizeof(real_t) * n); } else { s_bak = NULL; y_bak = NULL; } real_t *s_mem = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *y_mem = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *buffer_rho = (real_t*) malloc(sizeof(real_t) * mem_size); real_t *buffer_alpha = (real_t*) malloc(sizeof(real_t) * mem_size); bfgs_mem *out = (bfgs_mem*) malloc(sizeof(bfgs_mem)); out->s_mem = s_mem; out->y_mem = y_mem; out->buffer_rho = buffer_rho; out->buffer_alpha = buffer_alpha; out->s_bak = s_bak; out->y_bak = y_bak; out->mem_size = mem_size; out->mem_used = 0; out->mem_st_ix = 0; out->upd_freq = upd_freq; out->y_reg = y_reg; out->min_curvature = min_curvature; return out; } void dealloc_bfgs_mem(bfgs_mem *bfgs_memory) { free(bfgs_memory->s_mem); free(bfgs_memory->y_mem); free(bfgs_memory->buffer_rho); free(bfgs_memory->buffer_alpha); free(bfgs_memory->s_bak); free(bfgs_memory->y_bak); free(bfgs_memory); } fisher_mem* initialize_fisher_mem(const size_t mem_size, const int n) { real_t *F = (real_t*) malloc(sizeof(real_t) * n * mem_size); real_t *buffer_y = (real_t*) malloc(sizeof(real_t) * mem_size); fisher_mem *out = (fisher_mem*) malloc(sizeof(fisher_mem)); out->F = F; out->buffer_y = buffer_y; out->mem_size = mem_size; out->mem_used = 0; out->mem_st_ix = 0; return out; } void dealloc_fisher_mem(fisher_mem *fisher_memory) { free(fisher_memory->F); free(fisher_memory->buffer_y); free(fisher_memory); } static inline int check_bfgsmem_nonnull(bfgs_mem* bfgs_memory) { if ( (bfgs_memory->y_mem == NULL) || (bfgs_memory->s_mem == NULL) || (bfgs_memory->buffer_rho == NULL) || (bfgs_memory->buffer_alpha == NULL) || (bfgs_memory->s_bak == NULL && bfgs_memory->min_curvature > 0) || (bfgs_memory->y_bak == NULL && bfgs_memory->min_curvature > 0) ) { fprintf(stderr, "Error: Could not allocate memory for BFGS storage.\n"); return 1; } return 0; } static inline int check_fishermem_nonnull(fisher_mem* fisher_memory) { if (fisher_memory->F == NULL || fisher_memory->buffer_y == NULL){ fprintf(stderr, "Error: Could not allocate memory for Fisher storage.\n"); return 1; } return 0; } static inline int check_oLBFGS_nonnull(workspace_oLBFGS *oLBFGS) { /* Check for memory allocation failure */ if ( (oLBFGS->bfgs_memory == NULL) || (oLBFGS->grad_prev == NULL) || (oLBFGS == NULL) ){ fprintf(stderr, "Error: Could not allocate memory for oLBFGS.\n"); return 1; } return check_bfgsmem_nonnull(oLBFGS->bfgs_memory); } static inline int check_SQN_nonnull(workspace_SQN *SQN) { /* Check for memory allocation failure */ if ( (SQN->bfgs_memory == NULL) || (SQN->x_sum == NULL) || (SQN->x_avg_prev == NULL) || (SQN->grad_prev == NULL && SQN->use_grad_diff) || (SQN == NULL) ){ dealloc_SQN(SQN); fprintf(stderr, "Error: Could not allocate memory for SQN.\n"); return 1; } return check_bfgsmem_nonnull(SQN->bfgs_memory); } static inline int check_adaQN_nonnull(workspace_adaQN *adaQN) { /* Check for memory allocation failure */ if ( (adaQN->bfgs_memory == NULL) || (adaQN->H0 == NULL) || (adaQN->x_sum == NULL) || (adaQN->x_avg_prev == NULL) || (adaQN->grad_sum_sq == NULL) || (adaQN->grad_prev == NULL && adaQN->use_grad_diff) || (adaQN == NULL) ){ dealloc_adaQN(adaQN); fprintf(stderr, "Error: Could not allocate memory for adaQN.\n"); return 1; } if ( check_bfgsmem_nonnull(adaQN->bfgs_memory) ) {return 1;}; if (!adaQN->use_grad_diff){return check_fishermem_nonnull(adaQN->fisher_memory);} return 0; } void dealloc_oLBFGS(workspace_oLBFGS *oLBFGS) { dealloc_bfgs_mem(oLBFGS->bfgs_memory); free(oLBFGS->grad_prev); free(oLBFGS); } void dealloc_SQN(workspace_SQN *SQN) { dealloc_bfgs_mem(SQN->bfgs_memory); free(SQN->grad_prev); free(SQN->x_sum); free(SQN->x_avg_prev); free(SQN); } void dealloc_adaQN(workspace_adaQN *adaQN) { dealloc_bfgs_mem(adaQN->bfgs_memory); if (!adaQN->use_grad_diff || adaQN->fisher_memory != NULL){ dealloc_fisher_mem(adaQN->fisher_memory); } free(adaQN->H0); free(adaQN->grad_prev); free(adaQN->x_sum); free(adaQN->x_avg_prev); free(adaQN->grad_sum_sq); free(adaQN); } workspace_oLBFGS* initialize_oLBFGS(const int n, const size_t mem_size, const real_t hess_init, const real_t y_reg, const real_t min_curvature, const int check_nan, const int nthreads) { bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, 1); real_t *grad_prev = (real_t*) malloc(sizeof(real_t) * n); workspace_oLBFGS *out = (workspace_oLBFGS*) malloc(sizeof(workspace_oLBFGS)); out->bfgs_memory = bfgs_memory; out->grad_prev = grad_prev; out->hess_init = hess_init; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_oLBFGS_nonnull(out) ) {dealloc_oLBFGS(out); return NULL;} return out; } workspace_SQN* initialize_SQN(const int n, const size_t mem_size, const size_t bfgs_upd_freq, const real_t min_curvature, const int use_grad_diff, const real_t y_reg, const int check_nan, const int nthreads) { real_t *grad_prev; if (use_grad_diff){grad_prev = (real_t*) malloc(sizeof(real_t) * n);} else {grad_prev = NULL;} bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, bfgs_upd_freq); real_t *x_sum = (real_t*) calloc(n, sizeof(real_t)); real_t *x_avg_prev = (real_t*) malloc(sizeof(real_t) * n); workspace_SQN* out = (workspace_SQN*) malloc(sizeof(workspace_SQN)); out->bfgs_memory = bfgs_memory; out->grad_prev = grad_prev; out->x_sum = x_sum; out->x_avg_prev = x_avg_prev; out->use_grad_diff = use_grad_diff; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_SQN_nonnull(out) ) {dealloc_SQN(out); return NULL;} return out; } workspace_adaQN* initialize_adaQN(const int n, const size_t mem_size, const size_t fisher_size, const size_t bfgs_upd_freq, const real_t max_incr, const real_t min_curvature, const real_t scal_reg, const real_t rmsprop_weight, const int use_grad_diff, const real_t y_reg, const int check_nan, const int nthreads) { bfgs_mem *bfgs_memory = initialize_bfgs_mem(mem_size, n, min_curvature, y_reg, bfgs_upd_freq); fisher_mem *fisher_memory; real_t *grad_prev; if (use_grad_diff){ fisher_memory = NULL; grad_prev = (real_t*) malloc(sizeof(real_t) * n); } else { fisher_memory = initialize_fisher_mem(fisher_size, n); grad_prev = NULL; } real_t *H0 = (real_t*) malloc(sizeof(real_t) * n); real_t *x_sum = (real_t*) calloc(n, sizeof(real_t)); real_t *x_avg_prev = (real_t*) malloc(sizeof(real_t) * n); real_t *grad_sum_sq = (real_t*) calloc(n, sizeof(real_t)); workspace_adaQN *out = (workspace_adaQN*) malloc(sizeof(workspace_adaQN)); out->bfgs_memory = bfgs_memory; out->fisher_memory = fisher_memory; out->H0 = H0; out->grad_prev = grad_prev; out->x_sum = x_sum; out->x_avg_prev = x_avg_prev; out->grad_sum_sq = grad_sum_sq; out->max_incr = max_incr; out->scal_reg = scal_reg; out->rmsprop_weight = rmsprop_weight; out->use_grad_diff = use_grad_diff; out->f_prev = 0; out->niter = 0; out->section = 0; out->check_nan = check_nan; out->nthreads = nthreads; out->n = n; if ( check_adaQN_nonnull(out) ){dealloc_adaQN(out); return NULL;} return out; } /* Functions for adding and discarding correction pairs and previous gradients. When deleted, the data is not overwritten or freed, but the indexes are reset to act as if they were not present. */ static inline void flush_bfgs_mem(bfgs_mem *bfgs_memory) { bfgs_memory->mem_used = 0; bfgs_memory->mem_st_ix = 0; } static inline void flush_fisher_mem(fisher_mem *fisher_memory) { if (fisher_memory != NULL) { fisher_memory->mem_used = 0; fisher_memory->mem_st_ix = 0; } } static inline void incr_bfgs_counters(bfgs_mem *bfgs_memory) { bfgs_memory->mem_st_ix = (bfgs_memory->mem_st_ix + 1) % bfgs_memory->mem_size; bfgs_memory->mem_used = ((bfgs_memory->mem_used + 1) >= bfgs_memory->mem_size)? bfgs_memory->mem_size : (bfgs_memory->mem_used + 1); } static inline void incr_fisher_counters(fisher_mem *fisher_memory) { fisher_memory->mem_st_ix = (fisher_memory->mem_st_ix + 1) % fisher_memory->mem_size; fisher_memory->mem_used = ((fisher_memory->mem_used + 1) >= fisher_memory->mem_size)? fisher_memory->mem_size : (fisher_memory->mem_used + 1); } static inline void add_to_fisher_mem(real_t grad[], fisher_mem *fisher_memory, const int n, const int nthreads) { if (fisher_memory != NULL){ copy_arr(grad, fisher_memory->F + fisher_memory->mem_st_ix * n, n, nthreads); incr_fisher_counters(fisher_memory); } } static inline void backup_corr_pair(bfgs_mem *bfgs_memory, const int n, const int nthreads) { if (bfgs_memory->min_curvature > 0){ copy_arr(bfgs_memory->s_bak, bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, n, nthreads); copy_arr(bfgs_memory->y_bak, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); } } static inline void rollback_corr_pair(bfgs_mem *bfgs_memory, const int n, info_enum *iter_info, const int nthreads) { if (bfgs_memory->min_curvature > 0){ copy_arr(bfgs_memory->s_bak, bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, n, nthreads); copy_arr(bfgs_memory->y_bak, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); *iter_info = curvature_too_small; } } static inline void archive_x_avg(real_t x_avg[], real_t x_avg_prev[], const int n, const int nthreads) { copy_arr(x_avg, x_avg_prev, n, nthreads); set_to_zero(x_sum, n, nthreads); /* x_avg is aliased to x_sum */ } /* --------- End of initializers, deallocators, and updaters -------- */ /* ============= Optimization algorithms section ============= Note: the functions here oftentimes have an input variable 'nthreads', but most of the work is done through BLAS functions, and the number of threads for them is set beforehand in the optimizer functions. */ /* Approximate H^(-1) * g through the "L-BFGS two-loop recursion" For the variable names, refer to: Wright, S. and Nocedal, J., 1999. "Numerical optimization." (ch. 7) grad (in, out) : real_t[n] Gradient for the current values of the variables - the computed search direction will be written to this same array, overwriting the gradient. n : int Number of variables (dimensionality of 'x') H0 : real_t[n] or NULL Initial matrix H0 (diagonal only) from which H^-1 is updated. If passing NULL here and zero to 'h0', will use a scalar value as suggested in the book "Numerical optimization." (Wright & Nocedal) h0 : real_t number to which to initialize the diagonal H0. If passing zero here and NULL to 'H0', will use a scalar value as suggested in the book "Numerical optimization." (Wright & Nocedal) y_mem : real_t[mem_size, n] 'y' correction variables. These shall be ordered from earliest to latest, with the earliest vector not necessarily at the first position. s_mem : real_t[mem_size, n] 's' correction variables. These shall be ordered from earliest to latest, with the earliest vector not necessarily at the first position. mem_size : size_t Dimensionality of the arrays 'y_mem' and 's_mem' (how many rows it can have). mem_used : size_t Number of filled rows in 'y_mem' and 's_mem' mem_st_ix : size_t Position in 'y_mem' and 's_mem' at which the earliest vector is stored, with later elements following onwards, continuing at the beginning after position 'mem_used' if this is not zero. buffer_rho : real_t[mem_size] Temporary array in which to store the computed rho values. buffer_alpha : real_t[mem_size] Temporary array in which to store the computed alpha values. nthreads : int Number of parallel threads to use - most of the work is done on a BLAS library (and the threads for it are set elsewhere), but for very large problems, passes over the grad/out array can also be parallelized. */ static inline void approx_inv_hess_grad(real_t grad[], int n, real_t H0[], real_t h0, real_t y_mem[], real_t s_mem[], size_t mem_size, size_t mem_used, size_t mem_st_ix, real_t buffer_rho[], real_t buffer_alpha[], int nthreads) { real_t scaling, beta; size_t i, ipos, last_pos; /* backward pass: alpha <- rho * s' q; q <- q - alpha * y */ for (size_t ii = 0; ii < mem_used; ii++) { i = mem_used - ii - 1; ipos = (mem_st_ix + i) % mem_size; buffer_rho[i] = 1 / cblas_tdot(n, y_mem + ipos*n, 1, s_mem + ipos*n, 1); buffer_alpha[i] = buffer_rho[i] * cblas_tdot(n, grad, 1, s_mem + ipos*n, 1); cblas_taxpy(n, -buffer_alpha[i], y_mem + ipos*n, 1, grad, 1); } /* Use a diagonal matrix as a starting point: By default, will calculate it from the last correction pair */ if ( (H0 == NULL) && (h0 <= 0) ) { last_pos = (mem_st_ix - 1 + mem_used) % mem_size; scaling = cblas_tdot(n, s_mem + last_pos*n, 1, y_mem + last_pos*n, 1) / cblas_tdot(n, y_mem + last_pos*n, 1, y_mem + last_pos*n, 1); cblas_tscal(n, scaling, grad, 1); } /* But can also initialize it from values supplied by the user */ else { /* Use diagonal passed by user */ if (H0 != NULL) { multiply_elemwise(grad, H0, n, nthreads); } /* Use scalar passed by user */ else { cblas_tscal(n, h0, grad, 1); } } /* forward pass: beta <- rho * y' * r; r <- r * s * (alpha - beta) */ for (size_t i = 0; i < mem_used; i++) { ipos = (mem_st_ix + i) % mem_size; beta = buffer_rho[i] * cblas_tdot(n, y_mem + ipos*n, 1, grad, 1); cblas_taxpy(n, buffer_alpha[i] - beta, s_mem + ipos*n, 1, grad, 1); } } /* Update the data on previous squared gradients Can use either AdaGrad (simple sum) or RMSProp (squared sum) grad : new gradient to add grad_sum_sq (in, out) : array where to store sum of squared past gradients rmsprop_weight : weight in interval(0,1) to give to old info (if 0, will use AdaGrad) n : number of variables (dimensionality of 'x') nthreads : number of parallel threads to use */ static inline void update_sum_sq(real_t *restrict grad, real_t *restrict grad_sum_sq, real_t rmsprop_weight, int n, int nthreads) { #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) int n_szt = n; int i; #else size_t n_szt = (size_t) n; #endif real_t weight_new; /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; /* RMSProp update */ if (rmsprop_weight > 0 && rmsprop_weight < 1) { weight_new = 1 - rmsprop_weight; #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(n_szt, grad, grad_sum_sq, rmsprop_weight, weight_new) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad_sum_sq[i] = rmsprop_weight*grad_sum_sq[i] + weight_new*(grad[i] * grad[i]); } /* AdaGrad update */ else { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const > 4)) schedule(static) firstprivate(n_szt, grad, grad_sum_sq) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad_sum_sq[i] += grad[i] * grad[i]; } } /* Compute a search direction (used as H0 initializer by adaQN) as rescaled gradient using a diagonal matrix, given by the sums of squares of past gradients (AdaGrad or RMSProp formulae). direction (out) : array where to save the computed direction. (if NULL, will save the direction in the same 'grad' array) grad (in, out) : current gradient grad_sum_sq (in, out) : sum of squares of past gradients (weighted sum for RMSProp) n : number of variables (dimensionality of 'x') scal_reg : regularization (epsilon) for the scaling rmsprop_weight : weight for old gradients if using RMSProp (pass 0 for AdaGrad init) num_threads : number of parallel threads to use */ static inline void diag_rescal(real_t *restrict direction, real_t *restrict grad, real_t *restrict grad_sum_sq, int n, real_t scal_reg, real_t rmsprop_weight, int nthreads) { update_sum_sq(grad, grad_sum_sq, rmsprop_weight, n, nthreads); #if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) int i; int n_szt = n; #else size_t n_szt = (size_t) n; #endif /* oracle compilers cannot take 'const int' (CRAN requirement for building in solaris OS) */ int nthreads_non_const = nthreads; if (direction == NULL) { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const >= 4) ) schedule(static) firstprivate(direction, grad_sum_sq, scal_reg, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) grad[i] /= sqrt(grad_sum_sq[i] + scal_reg); } else { #pragma omp parallel for if( (n > 1e7) && (nthreads_non_const >= 4) ) schedule(static) firstprivate(direction, grad_sum_sq, scal_reg, n_szt) num_threads(nthreads_non_const) for (size_t_for i = 0; i < n_szt; i++) direction[i] = grad[i] / sqrt(grad_sum_sq[i] + scal_reg); } } /* Take a step in the search direction specified by the respective algorithm step_size : size of the step to take n : number of variables (dimensionality of 'x') x (in, out) : current values of the variables grad (in, out) : gradient at current values of x - the search direction will be written there, overwriting the gradient bfgs_memory : BFGS memory struct rmsprop_weight (adaQN) : weight for old gradients if using RMSProp (pass 0 for SQN, oLBFGS, and adaQN with AdaGrad init) H0 (adaQN) : temporary array where to store diagonal initializer for inv. Hessian grad_sum_sq (adaQN)(in,out) : sums of squares of past gradients (weighted sums in RMSProp) scal_reg (adaQN) : regularization for the diagonal rescaling using grad_sum_sq check_nan : whether to check the search direction for NaN or Inf (will reject it if so) iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void take_step(real_t step_size, int n, real_t x[], real_t grad[], bfgs_mem *bfgs_memory, real_t rmsprop_weight, real_t H0[], real_t h0, real_t grad_sum_sq[], real_t scal_reg, int check_nan, info_enum *iter_info, int nthreads) { /* When there are no correction pairs, take a gradient or rescaled gradient step */ if (bfgs_memory->mem_used == 0) { /* If no rescaling, take a simple gradient step, otherwise, take AdaGrad or RMSProp step */ if (grad_sum_sq != NULL) {diag_rescal(NULL, grad, grad_sum_sq, n, scal_reg, rmsprop_weight, nthreads);} } /* When there are correction pairs, get an approx. invHess-grad direction (with diagonal init) */ else { if (grad_sum_sq != NULL) { diag_rescal(H0, grad, grad_sum_sq, n, scal_reg, rmsprop_weight, nthreads); } approx_inv_hess_grad(grad, n, H0, h0, bfgs_memory->y_mem, bfgs_memory->s_mem, bfgs_memory->mem_size, bfgs_memory->mem_used, (bfgs_memory->mem_st_ix == bfgs_memory->mem_used)? 0 : bfgs_memory->mem_st_ix, bfgs_memory->buffer_rho, bfgs_memory->buffer_alpha, nthreads); } /* Check if the search direction is invalid */ if (check_nan) { if ( check_inf_nan(grad, n, nthreads) || /* There are also cases in which the search direction is not NaN, but is too large nevertheless */ cblas_tnrm2(n, grad, 1) > 1e3 * n ) { flush_bfgs_mem(bfgs_memory); *iter_info = search_direction_was_nan; return; } } /* Finally, take step in computed direction */ cblas_taxpy(n, -step_size, grad, 1, x, 1); } /* Update 's' correction vector If there's a curvature threshold, will also create a backup of the correction pair currently sitting in the memory slot into which the new pair will be written. Note that this procedure will not copy the new average into the previous average array, which needs to be done after updating 'y' in the main optimization function. x_sum : sum of 'x' (optimization variables) since the last BFGS update (will be overwritten during this procedure) (pass 'x' for oLBFGS) x_avg_prev : average values of 'x' during the interval of the previous BFGS update (pass 'x_prev' for oLBFGS) n : number of variables (dimensionality of 'x') needs_div : whether x_sum should be divided to obtain the average (pass 0 if it's already an average) bfgs_memory (in, out) : BFGS memory struct nthreads : number of parallel threads to use */ static inline void update_s_vector(real_t x_sum[], real_t x_avg_prev[], int n, int needs_div, bfgs_mem *bfgs_memory, int nthreads) { /* oLBFGS: s = x - x_prev ----not computed here others: s = x_avg - x_avg_prev */ backup_corr_pair(bfgs_memory, n, nthreads); if (needs_div) { average_from_sum(x_sum, bfgs_memory->upd_freq, n); } /* x_sum has now become x_avg --- this is aliased by the preprocessor, so don't worry about it not being declared */ difference_elemwise(bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n, x_avg, x_avg_prev, n, nthreads); } /* Check curvature See if the new correction pair meets a minimum curvature threshold. If it does, accept it (store it), and if not, restore back the old correction pair, which was backed-up during the 'update_s_vector' procedure. bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void check_min_curvature(bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* s^T * y / s^T * s > epsilon */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n;; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; real_t curv; if (bfgs_memory->min_curvature > 0) { curv = cblas_tdot(n, s, 1, y, 1) / cblas_tdot(n, s, 1, s, 1); if (curv <= bfgs_memory->min_curvature) { rollback_corr_pair(bfgs_memory, n, iter_info, nthreads); return; } } incr_bfgs_counters(bfgs_memory); } /* Update 'y' correction vector using gradient differences Note: 'x_sum' needs to be reset after this (SQN and adaQN) grad : gradient (at new 'x' on the same batch for oLBFGS, at 'x_avg' on a larger batch for others) grad_prev : previous gradient (at previous 'x' for oLBFGS, at 'x_avg_prev' on the previous large batch for others) bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') y_reg : regularization parameter (will add this times 's' to 'y') (pass 0 for SQN and adaQN) iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void update_y_grad_diff(real_t grad[], real_t grad_prev[], bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* oLBFGS: y = grad_batch(x) - grad_batch(x_prev) + lambda * s others: y = grad(x_avg) - grad_prev(x_avg_prev) */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n;; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; difference_elemwise(y, grad, grad_prev, n, nthreads); if (bfgs_memory->y_reg > 0){ cblas_taxpy(n, bfgs_memory->y_reg, s, 1, y, 1); } check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* Update 'y' correction vector using empirical Fisher matrix (adaQN) fisher_memory : empirical Fisher struct bfgs_memory (in, out) : BFGS memory struct n : number of variables (dimensionality of 'x') iter_info : pointer to the indicator on encountered problems nthreads : number of parallel threads to use */ static inline void update_y_fisher(fisher_mem *fisher_memory, bfgs_mem *bfgs_memory, int n, info_enum *iter_info, int nthreads) { /* y = F' (F * s) / |F| */ real_t *s = bfgs_memory->s_mem + bfgs_memory->mem_st_ix * n; real_t *y = bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n; CBLAS_ORDER c_ord = CblasRowMajor; CBLAS_TRANSPOSE trans_no = CblasNoTrans; CBLAS_TRANSPOSE trans_yes = CblasTrans; cblas_tgemv(c_ord, trans_no, fisher_memory->mem_used, n, 1, fisher_memory->F, n, s, 1, 0, fisher_memory->buffer_y, 1); cblas_tgemv(c_ord, trans_yes, fisher_memory->mem_used, n, 1 / (real_t) fisher_memory->mem_used, fisher_memory->F, n, fisher_memory->buffer_y, 1, 0, y, 1); check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* Update 'y' correction vector using the production between the Hessian and the 's' vector hess_vec : calculated Hessian * s bfgs_memory (in, out) : BFGS memory struct iter_info : pointer to the indicator on encountered problems n : number of variables (dimensionality of 'x') nthreads : number of parallel threads to use */ static inline void update_y_hessvec(real_t hess_vec[], bfgs_mem *bfgs_memory, info_enum *iter_info, int n, int nthreads) { copy_arr(hess_vec, bfgs_memory->y_mem + bfgs_memory->mem_st_ix * n, n, nthreads); check_min_curvature(bfgs_memory, n, iter_info, nthreads); } /* ============= Optimizer functions for the external API ============= Documentation for them can be found in the header file. These functions are very hard to follow, but think of them like this: each of them will send you to a different part as if it were a 'goto', only there will be an interruption in between where the required calculation is requested externally. Check which part sent you to where you currently are, and where is each part going to send you next. */ int run_oLBFGS(real_t step_size, real_t x[], real_t grad[], real_t **req, task_enum *task, workspace_oLBFGS *oLBFGS, info_enum *iter_info) { *iter_info = no_problems_encountered; /* first run: immediately request a gradient */ if (oLBFGS->section == 0) { *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } /* second run (main loop): save grad, take a step, save delta_x, request another gradient in same batch */ if (oLBFGS->section == 1) { /* save gradient */ copy_arr(grad, oLBFGS->grad_prev, oLBFGS->n, oLBFGS->nthreads); /* take a step */ take_step(step_size, oLBFGS->n, x, grad, oLBFGS->bfgs_memory, 0, NULL, oLBFGS->hess_init, NULL, 0, oLBFGS->check_nan, iter_info, oLBFGS->nthreads); oLBFGS->niter++; /* store differences in BFGS memory */ if (*iter_info == no_problems_encountered){ backup_corr_pair(oLBFGS->bfgs_memory, oLBFGS->n, oLBFGS->nthreads); /* rollback happens on 'update_y_grad_diff' */ cblas_tscal(oLBFGS->n, -step_size, grad, 1); copy_arr(grad, oLBFGS->bfgs_memory->s_mem + oLBFGS->bfgs_memory->mem_st_ix * oLBFGS->n, oLBFGS->n, oLBFGS->nthreads); /* request another gradient */ *task = calc_grad_same_batch; *req = x; oLBFGS->section = 2; return 1; } else { if (*iter_info == search_direction_was_nan) { flush_bfgs_mem(oLBFGS->bfgs_memory); } *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } } /* third run (loop): update correction pairs, request a gradient on new batch */ if (oLBFGS->section == 2) { update_y_grad_diff(grad, oLBFGS->grad_prev, oLBFGS->bfgs_memory, oLBFGS->n, iter_info, oLBFGS->nthreads); *task = calc_grad; *req = x; oLBFGS->section = 1; return 0; } *task = invalid_input; fprintf(stderr, "oLBFGS got an invalid workspace as input.\n"); return -1000; } int run_SQN(real_t step_size, real_t x[], real_t grad[], real_t hess_vec[], real_t **req, real_t **req_vec, task_enum *task, workspace_SQN *SQN, info_enum *iter_info) { *iter_info = no_problems_encountered; int return_value = 0; /* first run: immediately request a gradient */ if (SQN->section == 0) { // add_to_sum(x, SQN->x_sum, SQN->n, SQN->nthreads); goto resume_main_loop; } /* second run (main loop): take a step, save sum, see if it's time for creating correction pair */ if (SQN->section == 1) { /* take a step */ take_step(step_size, SQN->n, x, grad, SQN->bfgs_memory, 0, NULL, 0, NULL, 0, SQN->check_nan, iter_info, SQN->nthreads); SQN->niter++; /* check for unchanged parameters */ if (*iter_info == search_direction_was_nan) {return_value = 0;} else {return_value = 1;} /* save sum of new values note: even if they are not updated, need to maintain the sum in the same magnitude, as it will be divided by L */ add_to_sum(x, SQN->x_sum, SQN->n, SQN->nthreads); /* usually, requests a new gradient and returns right here */ if ( (SQN->niter % SQN->bfgs_memory->upd_freq) != 0 ) { goto resume_main_loop; } /* at some intervals, update hessian approx */ /* exception: the first time, just store the averages - if using grad diff, request a long gradient on those, else go back */ if (SQN->niter == SQN->bfgs_memory->upd_freq) { average_from_sum(SQN->x_sum, SQN->bfgs_memory->upd_freq, SQN->n); archive_x_avg(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); /* note: x_avg is alised by the preprocessor as synonym to x_sum */ if (SQN->use_grad_diff) { *task = calc_grad_big_batch; *req = SQN->x_avg_prev; SQN->section = 2; return return_value; } else { goto resume_main_loop; } } /* first update 's' (turns the sum to avg), but don't reset the sum yet as it'll be needed for a hessian-vec or long grad */ update_s_vector(SQN->x_sum, SQN->x_avg_prev, SQN->n, 1, SQN->bfgs_memory, SQN->nthreads); /* request long grad on the new average */ if (SQN->use_grad_diff) { *task = calc_grad_big_batch; SQN->section = 3; *req = SQN->x_avg; } /* request hessian-vector on the differences between the averages */ else { *task = calc_hess_vec; SQN->section = 4; *req = SQN->x_avg; *req_vec = SQN->bfgs_memory->s_mem + SQN->n * SQN->bfgs_memory->mem_st_ix; } return return_value; } /* third run: got a long gradient on first averages, store it and go back */ if (SQN->section == 2) { copy_arr(grad, SQN->grad_prev, SQN->n, SQN->nthreads); goto resume_main_loop; } /* fourth run (loop): got a long gradient on new averages, reset sum, create correction pair and go back */ if (SQN->section == 3) { update_y_grad_diff(grad, SQN->grad_prev, SQN->bfgs_memory, SQN->n, iter_info, SQN->nthreads); if (*iter_info == no_problems_encountered){ copy_arr(grad, SQN->grad_prev, SQN->n, SQN->nthreads); copy_arr(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); } set_to_zero(SQN->x_sum, SQN->n, SQN->nthreads); goto resume_main_loop; } /* fifth run (loop): got a hessian-vector product, reset sum, create a correction pair and go back */ if (SQN->section == 4) { archive_x_avg(SQN->x_avg, SQN->x_avg_prev, SQN->n, SQN->nthreads); update_y_hessvec(hess_vec, SQN->bfgs_memory, iter_info, SQN->n, SQN->nthreads); goto resume_main_loop; } *task = invalid_input; fprintf(stderr, "SQN got an invalid workspace as input.\n"); return -1000; resume_main_loop: SQN->section = 1; *task = calc_grad; *req = x; return return_value; } int run_adaQN(real_t step_size, real_t x[], real_t f, real_t grad[], real_t **req, task_enum *task, workspace_adaQN *adaQN, info_enum *iter_info) { *iter_info = no_problems_encountered; int return_value = 0; /* first run: immediately request a gradient */ if (adaQN->section == 0) { // add_to_sum(x, adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } /* second run (main loop): store gradient, take a step (gradient_sq is summed there), sum x, see if it's time for creating correction pair --if so, request either long grad or function */ if (adaQN->section == 1) { /* store gradient */ add_to_fisher_mem(grad, adaQN->fisher_memory, adaQN->n, adaQN->nthreads); /* take a step */ take_step(step_size, adaQN->n, x, grad, adaQN->bfgs_memory, adaQN->rmsprop_weight, adaQN->H0, 0, adaQN->grad_sum_sq, adaQN->scal_reg, adaQN->check_nan, iter_info, adaQN->nthreads); if (*iter_info == search_direction_was_nan) { // flush_fisher_mem(adaQN->fisher_memory); return_value = 0; } else { return_value = 1; } adaQN->niter++; /* save sum of new values note: even if they are not updated, need to maintain the sum in the same magnitude, as it will be divided by L */ add_to_sum(x, adaQN->x_sum, adaQN->n, adaQN->nthreads); /* usually, requests a new gradient and returns right here */ if ( (adaQN->niter % adaQN->bfgs_memory->upd_freq) != 0 ) { goto resume_main_loop; } /* at some intervals, update hessian approx */ /* exception: the first time, just store the averages, then: -if use_grad_diff, request a long gradient on the averages (function comes later) -if using max_incr, request a function on the averages -if neither, go back to main loop */ if (adaQN->niter == adaQN->bfgs_memory->upd_freq) { average_from_sum(adaQN->x_sum, adaQN->bfgs_memory->upd_freq, adaQN->n); archive_x_avg(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, adaQN->nthreads); /* note: x_avg is aliased by the preprocessor as synonym to x_sum */ if (adaQN->use_grad_diff){ *task = calc_grad_big_batch; *req = adaQN->x_avg_prev; adaQN->section = 2; return return_value; } if (adaQN->max_incr > 0){ *task = calc_fun_val_batch; *req = adaQN->x_avg_prev; adaQN->section = 3; return return_value; } goto resume_main_loop; } /* evaluate function on new averages if needed */ if (adaQN->max_incr > 0) { average_from_sum(adaQN->x_sum, adaQN->bfgs_memory->upd_freq, adaQN->n); *task = calc_fun_val_batch; *req = adaQN->x_avg; adaQN->section = 5; return return_value; } /* first update 's' (turns the sum to avg), but don't reset the sum yet as it'll be needed for a hessian-vec or long grad */ update_s_vector(adaQN->x_sum, adaQN->x_avg_prev, adaQN->n, 1, adaQN->bfgs_memory, adaQN->nthreads); goto update_y; } /* third run: got a long gradient on first averages, store it and go back */ if (adaQN->section == 2) { copy_arr(grad, adaQN->grad_prev, adaQN->n, adaQN->nthreads); /* ask for function if needed */ if (adaQN->max_incr){ *task = calc_fun_val_batch; *req = adaQN->x_avg_prev; adaQN->section = 3; return 0; } else { goto resume_main_loop; } } /* fourth run: got first function eval on validation batch, store it and request a gradient */ if (adaQN->section == 3) { adaQN->f_prev = f; goto resume_main_loop; } /* fifth run (loop): got a long gradient on new averages, create correction pair (function was asked before) */ if (adaQN->section == 4){ update_y_grad_diff(grad, adaQN->grad_prev, adaQN->bfgs_memory, adaQN->n, iter_info, adaQN->nthreads); if (*iter_info == no_problems_encountered) { copy_arr(grad, adaQN->grad_prev, adaQN->n, adaQN->nthreads); } set_to_zero(adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } /* sixth run (loop): evaluated function on new averages, now see whether to keep correction pair */ if (adaQN->section == 5) { if (f > adaQN->max_incr * adaQN->f_prev || isinf(f) || isnan(f) ) { flush_bfgs_mem(adaQN->bfgs_memory); flush_fisher_mem(adaQN->fisher_memory); copy_arr(adaQN->x_avg_prev, x, adaQN->n, adaQN->nthreads); *iter_info = func_increased; return_value = 1; goto resume_main_loop; } else { adaQN->f_prev = f; update_s_vector(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, 0, adaQN->bfgs_memory, adaQN->nthreads); goto update_y; } } *task = invalid_input; fprintf(stderr, "adaQN got an invalid workspace as input.\n"); return -1000; update_y: if (adaQN->use_grad_diff) { *req = adaQN->x_avg; *task = calc_grad_big_batch; adaQN->section = 4; return return_value; } else { update_y_fisher(adaQN->fisher_memory, adaQN->bfgs_memory, adaQN->n, iter_info, adaQN->nthreads); if (*iter_info == no_problems_encountered) { copy_arr(adaQN->x_avg, adaQN->x_avg_prev, adaQN->n, adaQN->nthreads); } set_to_zero(adaQN->x_sum, adaQN->n, adaQN->nthreads); goto resume_main_loop; } resume_main_loop: adaQN->section = 1; *task = calc_grad; *req = x; return return_value; } #ifdef __cplusplus } #endif
GB_unaryop__minv_bool_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_int8 // op(A') function: GB_tran__minv_bool_int8 // C type: bool // A type: int8_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ int8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_int8 ( bool *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
text_parser.h
/*! * Copyright (c) 2015 by Contributors * \file text_parser.h * \brief iterator parser to parse text format * \author Tianqi Chen */ #ifndef DMLC_DATA_TEXT_PARSER_H_ #define DMLC_DATA_TEXT_PARSER_H_ #include <dmlc/data.h> #include <dmlc/omp.h> #include <vector> #include <cstring> #include <algorithm> #include "./row_block.h" #include "./parser.h" namespace dmlc { namespace data { /*! * \brief Text parser that parses the input lines * and returns rows in input data */ template <typename IndexType> class TextParserBase : public ParserImpl<IndexType> { public: explicit TextParserBase(InputSplit *source, int nthread) : bytes_read_(0), source_(source) { int maxthread = std::max(omp_get_num_procs() / 2 - 4, 1); nthread_ = std::min(maxthread, nthread); } virtual ~TextParserBase() { delete source_; } virtual void BeforeFirst(void) { source_->BeforeFirst(); } virtual size_t BytesRead(void) const { return bytes_read_; } virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) { return FillData(data); } protected: /*! * \brief parse data into out * \param begin beginning of buffer * \param end end of buffer */ virtual void ParseBlock(const char *begin, const char *end, RowBlockContainer<IndexType> *out) = 0; /*! * \brief read in next several blocks of data * \param data vector of data to be returned * \return true if the data is loaded, false if reach end */ inline bool FillData(std::vector<RowBlockContainer<IndexType>> *data); /*! * \brief start from bptr, go backward and find first endof line * \param bptr end position to go backward * \param begin the beginning position of buffer * \return position of first endof line going backward, returns begin if not found */ static inline const char *BackFindEndLine(const char *bptr, const char *begin) { for (; bptr != begin; --bptr) { if (*bptr == '\n' || *bptr == '\r') return bptr; } return begin; } /*! * \brief Ignore UTF-8 BOM if present * \param begin reference to begin pointer * \param end reference to end pointer */ static inline void IgnoreUTF8BOM(const char **begin, const char **end) { int count = 0; for (count = 0; *begin != *end && count < 3; count++, ++*begin) { if (!begin || !*begin) break; if (**begin != '\xEF' && count == 0) break; if (**begin != '\xBB' && count == 1) break; if (**begin != '\xBF' && count == 2) break; } if (count < 3) *begin -= count; } private: // nthread int nthread_; // number of bytes readed size_t bytes_read_; // source split that provides the data InputSplit *source_; // exception_ptr to hold exception thrown in OMP threads std::exception_ptr parser_exception_; // mutex for the exception_ptr std::mutex mutex_exception_; }; // implementation template <typename IndexType> inline bool TextParserBase<IndexType>::FillData( std::vector<RowBlockContainer<IndexType> > *data) { InputSplit::Blob chunk; if (!source_->NextChunk(&chunk)) return false; const int nthread = omp_get_max_threads(); // reserve space for data data->resize(nthread); bytes_read_ += chunk.size; CHECK_NE(chunk.size, 0U); const char *head = reinterpret_cast<char *>(chunk.dptr); #pragma omp parallel num_threads(nthread) { try { // threadid int tid = omp_get_thread_num(); size_t nstep = (chunk.size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, chunk.size); size_t send = std::min((tid + 1) * nstep, chunk.size); const char *pbegin = BackFindEndLine(head + sbegin, head); const char *pend; if (tid + 1 == nthread) { pend = head + send; } else { pend = BackFindEndLine(head + send, head); } ParseBlock(pbegin, pend, &(*data)[tid]); } catch (dmlc::Error& ex) { { std::lock_guard<std::mutex> lock(mutex_exception_); if (!parser_exception_) { parser_exception_ = std::current_exception(); } } } } if (parser_exception_) { std::rethrow_exception(parser_exception_); } this->data_ptr_ = 0; return true; } } // namespace data } // namespace dmlc #endif // DMLC_DATA_TEXT_PARSER_H_
GB_binop__bget_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint8) // C=scalar+B GB (_bind1st__bget_uint8) // C=scalar+B' GB (_bind1st_tran__bget_uint8) // C=A+scalar GB (_bind2nd__bget_uint8) // C=A'+scalar GB (_bind2nd_tran__bget_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lock-unrelated.c
/* * lock-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; omp_lock_t lock; omp_init_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_lock(&lock); // Dummy locking. omp_unset_lock(&lock); var++; } omp_destroy_lock(&lock); int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}lock-unrelated.c:31 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}lock-unrelated.c:31 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
rsvp_fmt_plug.c
/* * Cracker for HMAC-MD5 and HMAC-SHA1 based authentication in RSVP. * * This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without# * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rsvp; #elif FMT_REGISTERS_H john_register_one(&fmt_rsvp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 4096 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #endif // __MIC__ #endif #include "arch.h" #include "md5.h" #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "rsvp" #define FORMAT_NAME "HMAC-MD5 / HMAC-SHA1, RSVP, IS-IS" #define FORMAT_TAG "$rsvp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" #define MAX_SALT_SIZE 8192 // currently only 2 types, 1 for md5 and 2 for SHA1. Bump this // number each type a type is added, and make sure the types // are sequential. #define MAX_TYPES 2 static struct fmt_tests tests[] = { {"$rsvp$1$10010000ff0000ac002404010100000000000001d7e95bfa0000003a00000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$636d8e6db5351fbc9dad620c5ec16c0b", "password12345"}, {"$rsvp$2$10010000ff0000b0002804010100000000000001d7e95bfa0000055d0000000000000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$ab63f157e601742983b853f13a63bc4d4379a434", "JtR_kicks_ass"}, // IS-IS HMAC-MD5 hash {"$rsvp$1$831b01000f01000001192168001005001e05d940192168001005010a1136000000000000000000000000000000008101cc0104034900018404c0a87805d30300000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000890000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000$ae116a4cff88a4b13b3ae14bf169ff5c", "password12345"}, // IS-IS HMAC-MD5 hash {"$rsvp$1$831b01000f01000001192168001005001e05d940192168001005010a1136000000000000000000000000000000008101cc0104034900018404c0a87805d30300000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008ff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000890000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000$5048a1fe4ed87c32bc6c4af43095cae4", "1234567890"}, // IS-IS HMAC-MD5, isis-hmac-md5_key-1234.pcap {"$rsvp$1$831401001101000301192168201101001b005a000104034900018102cc8ee50400000002e810fe800000000000000465fffffe000000f00f0000000004192168201104000000040a113600000000000000000000000000000000$44b62860b363f9adf60acdb9d66abe27", "1234"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; // when we add more types, they need to be sequential (next will be 3), // AND we need to bump this to the count. Each type will use one of these // to track whether it has build the first half of the hmac. The size // of this array should be 1 more than the max number of types. static int new_keys[MAX_TYPES+1]; // we make our crypt_out large enough for an SHA1 output now. Even though // we only compare first BINARY_SIZE data. static uint32_t (*crypt_out)[ (BINARY_SIZE+4) / sizeof(uint32_t)]; static SHA_CTX *ipad_ctx; static SHA_CTX *opad_ctx; static MD5_CTX *ipad_mctx; static MD5_CTX *opad_mctx; static struct custom_salt { int type; int salt_length; unsigned char salt[MAX_SALT_SIZE]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); ipad_mctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_mctx)); opad_mctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_mctx)); } static void done(void) { MEM_FREE(opad_mctx); MEM_FREE(ipad_mctx); MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *strkeep; int version; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; strkeep = strdup(ciphertext); p = &strkeep[TAG_LENGTH]; if ((p = strtokm(p, "$")) == NULL) /* version */ goto err; version = atoi(p); if (version != 1 && version != 2) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) >= MAX_SALT_SIZE*2) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hash */ goto err; /* there is code that trim longer binary values, so we do not need to check for extra long */ if (strlen(p) < BINARY_SIZE*2) goto err; if (!ishexlc(p)) goto err; MEM_FREE(strkeep); return 1; err:; MEM_FREE(strkeep); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; p = ciphertext; cs.type = atoi(p); p = p + 2; q = strchr(p, '$') + 1; cs.salt_length = (q - p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; return (void*)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char buf[20]; if (cur_salt->type == 1) { MD5_CTX ctx; if (new_keys[cur_salt->type]) { int i, len = strlen(saved_key[index]); unsigned char *p = (unsigned char*)saved_key[index]; unsigned char pad[64]; if (len > 64) { MD5_Init(&ctx); MD5_Update(&ctx, p, len); MD5_Final(buf, &ctx); len = 16; p = buf; } for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x36; } MD5_Init(&ipad_mctx[index]); MD5_Update(&ipad_mctx[index], pad, len); if (len < 64) MD5_Update(&ipad_mctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len); for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x5C; } MD5_Init(&opad_mctx[index]); MD5_Update(&opad_mctx[index], pad, len); if (len < 64) MD5_Update(&opad_mctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len); } memcpy(&ctx, &ipad_mctx[index], sizeof(ctx)); MD5_Update(&ctx, cur_salt->salt, cur_salt->salt_length); MD5_Final(buf, &ctx); memcpy(&ctx, &opad_mctx[index], sizeof(ctx)); MD5_Update(&ctx, buf, 16); MD5_Final((unsigned char*)(crypt_out[index]), &ctx); } else if (cur_salt->type == 2) { SHA_CTX ctx; if (new_keys[cur_salt->type]) { int i, len = strlen(saved_key[index]); unsigned char *p = (unsigned char*)saved_key[index]; unsigned char pad[64]; if (len > 64) { SHA1_Init(&ctx); SHA1_Update(&ctx, p, len); SHA1_Final(buf, &ctx); len = 20; p = buf; } for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x36; } SHA1_Init(&ipad_ctx[index]); SHA1_Update(&ipad_ctx[index], pad, len); if (len < 64) SHA1_Update(&ipad_ctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len); for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x5C; } SHA1_Init(&opad_ctx[index]); SHA1_Update(&opad_ctx[index], pad, len); if (len < 64) SHA1_Update(&opad_ctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, cur_salt->salt, cur_salt->salt_length); SHA1_Final(buf, &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, buf, 20); // NOTE, this writes 20 bytes. That is why we had to bump up the size of each crypt_out[] value, // even though we only look at the first 16 bytes when comparing the saved binary. SHA1_Final((unsigned char*)(crypt_out[index]), &ctx); } } new_keys[cur_salt->type] = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void rsvp_set_key(char *key, int index) { saved_len[index] = strlen(key); strncpy(saved_key[index], key, sizeof(saved_key[0])); // Workaround for self-test code not working as IRL new_keys[1] = new_keys[2] = 2; } static void clear_keys(void) { int i; for (i = 0; i <= MAX_TYPES; ++i) new_keys[i] = 1; } static char *get_key(int index) { return saved_key[index]; } /* * report hash algorithm used for hmac as "tunable cost" */ static unsigned int rsvp_hash_type(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->type; } struct fmt_main fmt_rsvp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "hash algorithm used for hmac [1:MD5 2:SHA1]" }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { rsvp_hash_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, rsvp_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif
vlisa_precoloring.c
/* ** Implementation of LISA algorithm ** for statistical inference of fMRI images ** ** 1st level analysis using GLM (general linear model) with pre-coloring ** ** G.Lohmann, April 2017 */ #include "viaio/Vlib.h" #include "viaio/file.h" #include "viaio/mu.h" #include "viaio/option.h" #include "viaio/os.h" #include <viaio/VImage.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_cdf.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_histogram.h> #include <gsl/gsl_permutation.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /*_OPENMP*/ typedef struct TrialStruct { int id; float onset; float duration; float height; } Trial; extern void VIsolatedVoxels(VImage src,float threshold); extern void VHistogram(gsl_histogram *histogram,VString filename); extern void VCheckImage(VImage src); extern void FDR(VImage src,VImage dest,gsl_histogram *nullhist,gsl_histogram *realhist,double); extern double ttest1(double *data1,int n); extern void ImageStats(VImage src,double *,double *,double *hmin,double *hmax); extern Trial *ReadDesign(VStringConst designfile,int *numtrials,int *nevents); extern gsl_matrix *VCreateDesign(int ntimesteps,int nevents,int,VBoolean,gsl_matrix *); extern void VHemoModel(Trial *trial,int ntrials,int nevents,int ntimesteps,double tr,int,VBoolean,gsl_matrix *,gsl_matrix *); extern Trial *CopyTrials(Trial *trial,int numtrials); extern void VGLM(gsl_matrix *Data,gsl_matrix *X,gsl_matrix *XInv,gsl_vector *con,VImage map,VImage zmap); extern Trial *ConcatenateTrials(Trial **trial,int *numtrials,float *run_duration,int dlists,int sumtrials); extern double VImageVar(VImage src); extern void VImageCount(VImage src); extern void VBilateralFilter(VImage src,VImage,int radius,double var1,double var2,int); extern void VGetHistRange(VImage src,double *hmin,double *hmax); extern void VZScale(VImage src,float,float stddev); extern float VGetMode(VImage src); extern void GlobalMean(gsl_matrix *Data,gsl_matrix *covariates,int column); extern gsl_matrix *VReadCovariates(VString cfile,VBoolean normalize); extern VImage VoxelMap(VAttrList list); extern gsl_matrix *VReadImageData(VAttrList *list,int nlists); extern void VGetTimeInfos(VAttrList *list,int nlists,double *mtr,float *run_duration); extern void VRowNormalize(gsl_matrix *Data); extern void CheckTrialLabels(Trial *trial,int numtrials); extern void HistoUpdate(VImage,gsl_histogram *); extern void PlotDesign(gsl_matrix *X,double tr,VString filename); void XCheckImage(VImage src,char *filename) { VAttrList out_list = VCreateAttrList(); VAppendAttr(out_list,"image",NULL,VImageRepn,src); FILE *out_file = fopen(filename,"w"); VWriteFile (out_file, out_list); } /* shuffle each run separately to ensure exchangebility, concatenate individual permtables */ int **genperm(gsl_rng *rx,int *numtrials,int sumtrials,int dlists,int numperm) { int i,j,k; int **permtable = (int **) VCalloc(numperm,sizeof(int *)); gsl_permutation **perm = (gsl_permutation **) VCalloc(dlists,sizeof(gsl_permutation *)); for (k=0; k<dlists; k++) { perm[k] = gsl_permutation_alloc((size_t)numtrials[k]); gsl_permutation_init (perm[k]); } for (i = 0; i < numperm; i++) { permtable[i] = (int *) VCalloc(sumtrials,sizeof(int)); int jj=0; for (k=0; k<dlists; k++) { gsl_ran_shuffle (rx, perm[k]->data,numtrials[k],sizeof(size_t)); for (j=0; j<numtrials[k]; j++) { permtable[i][j+jj] = perm[k]->data[j] + jj; } jj += numtrials[k]; } } for (k=0; k<dlists; k++) { gsl_permutation_free(perm[k]); } return permtable; } VDictEntry HemoDict[] = { { "gamma_0", 0 }, { "gamma_1", 1 }, { "gamma_2", 2 }, { "gauss", 3 }, { NULL } }; int main (int argc, char *argv[]) { static VArgVector in_files; static VArgVector des_files; static VString cova_filename=""; static VString out_filename=""; static VString plot_filename=""; static VString mask_filename=""; static VShort hemomodel = 0; static VBoolean firstcol = TRUE; static VArgVector contrast; static VFloat alpha = 0.05; static VShort radius = 2; static VFloat rvar = 2.0; static VFloat svar = 2.0; static VShort numiter = 2; static VBoolean cleanup = TRUE; static VBoolean demean = TRUE; static VBoolean verbose = FALSE; static VBoolean globalmean = FALSE; static VShort numperm = 5000; static VLong seed = 99402622; static VShort nproc = 0; static VOptionDescRec options[] = { {"in", VStringRepn, 0, & in_files, VRequiredOpt, NULL,"Input files" }, {"out", VStringRepn, 1, & out_filename, VRequiredOpt, NULL,"Output file" }, {"design", VStringRepn, 0, & des_files, VRequiredOpt, NULL,"Design files (1st level)" }, {"contrast", VFloatRepn, 0, (VPointer) &contrast, VRequiredOpt, NULL, "Contrast vector"}, {"nuisance", VStringRepn, 1, & cova_filename, VOptionalOpt, NULL,"Nuisance regressors" }, {"demean",VBooleanRepn,1,(VPointer) &demean,VOptionalOpt,NULL,"Whether to subtract mean in nuisance regressors"}, {"hemo", VShortRepn, 1, (VPointer) &hemomodel, VOptionalOpt, HemoDict,"Hemodynamic model" }, {"col1", VBooleanRepn, 1, (VPointer) &firstcol, VOptionalOpt, NULL,"Whether to add a constant first column" }, {"alpha",VFloatRepn,1,(VPointer) &alpha,VOptionalOpt,NULL,"FDR significance level"}, {"perm",VShortRepn,1,(VPointer) &numperm,VOptionalOpt,NULL,"Number of permutations"}, {"seed",VLongRepn,1,(VPointer) &seed,VOptionalOpt,NULL,"Seed for random number generation"}, {"plotdesign", VStringRepn, 1, & plot_filename, VOptionalOpt, NULL,"Filename for plotting design matrix X" }, {"radius",VShortRepn,1,(VPointer) &radius,VOptionalOpt,NULL,"Bilateral parameter (radius in voxels)"}, {"rvar",VFloatRepn,1,(VPointer) &rvar,VOptionalOpt,NULL,"Bilateral parameter (radiometric)"}, {"svar",VFloatRepn,1,(VPointer) &svar,VOptionalOpt,NULL,"Bilateral parameter (spatial)"}, {"filteriterations",VShortRepn,1,(VPointer) &numiter,VOptionalOpt,NULL,"Bilateral parameter (number of iterations)"}, {"cleanup",VBooleanRepn,1,(VPointer) &cleanup,VOptionalOpt,NULL,"Whether to remove isloated voxels"}, {"mask", VStringRepn, 1, (VPointer) &mask_filename, VRequiredOpt, NULL, "Mask"}, {"j",VShortRepn,1,(VPointer) &nproc,VOptionalOpt,NULL,"number of processors to use, '0' to use all"}, }; FILE *fp=NULL; VString in_filename; VAttrList out_list=NULL,geolist=NULL; int i; char *prg_name=GetLipsiaName("vlisa_precoloring"); fprintf (stderr, "%s\n", prg_name); /* parse command line */ if (! VParseCommand (VNumber (options), options, & argc, argv)) { VReportUsage (argv[0], VNumber (options), options, NULL); exit (EXIT_FAILURE); } if (argc > 1) { VReportBadArgs (argc, argv); exit (EXIT_FAILURE); } /* omp-stuff */ #ifdef _OPENMP int num_procs=omp_get_num_procs(); if (nproc > 0 && nproc < num_procs) num_procs = nproc; fprintf(stderr," using %d cores\n",(int)num_procs); omp_set_num_threads(num_procs); #endif /* _OPENMP */ /* read functional image data */ int nlists = in_files.number; if (nlists < 1) VError(" no input"); VAttrList *list = (VAttrList *) VCalloc(nlists,sizeof(VAttrList)); for (i=0; i<nlists; i++) { in_filename = ((VString *) in_files.vector)[i]; fprintf(stderr," %3d: %s\n",i,in_filename); list[i] = VReadAttrList(in_filename,0L,TRUE,FALSE); if (geolist == NULL) { geolist = VGetGeoInfo(list[i]); double *DGeo = VGetGeoDim(geolist,NULL); if (fabs(DGeo[0]-4.0) > 0.01) VError(" Input files must be 4D (not 3D)"); } } /* get number og design files */ int dlists = des_files.number; if (dlists != nlists) { VError(" number of input functional files (%d) and design files (%d) do not match",nlists,dlists); } /* apply brain mask or threshold */ VMultMinval(list,nlists,mask_filename,0.0); /* read data and voxel map */ double tr=0; float *run_duration = (float *) VCalloc(nlists,sizeof(float)); VGetTimeInfos(list,nlists,&tr,run_duration); gsl_matrix *Data = VReadImageData(list,nlists); VImage map = VoxelMap(list[0]); int nslices = VPixel(map,0,3,0,VShort); int nrows = VPixel(map,0,3,1,VShort); int ncols = VPixel(map,0,3,2,VShort); int ntimesteps = Data->size2; /* additional regressors, no task labels, not included in permutations */ gsl_matrix *ctmp1=NULL; gsl_matrix *ctmp2=NULL; gsl_matrix *covariates=NULL; int cdim = 1; int nuisance_dim=0; if (strlen(cova_filename) > 1) { ctmp1 = VReadCovariates(cova_filename,demean); if (ctmp1->size1 != Data->size2) VError(" num timesteps in covariate file not consistent with data"); nuisance_dim = ctmp1->size2; } if (globalmean) { if (ctmp1 != NULL) cdim = ctmp1->size2+1; ctmp2 = gsl_matrix_calloc(Data->size2,cdim); GlobalMean(Data,ctmp2,(int)(cdim-1)); } if (ctmp1 != NULL && ctmp2 == NULL) covariates = ctmp1; if (ctmp2 != NULL) covariates = ctmp2; /* design files with task labels */ Trial **trial = (Trial **) VCalloc(dlists,sizeof(Trial *)); int *numtrials = (int *) VCalloc(dlists,sizeof(int *)); int nevents = 0; int sumtrials = 0; for (i=0; i<dlists; i++) { in_filename = ((VString *) des_files.vector)[i]; fprintf(stderr," %3d: %s\n",i,in_filename); int kk=0,jj=0; trial[i] = ReadDesign(in_filename,&kk,&jj); numtrials[i] = kk; if (jj > nevents) nevents = jj; sumtrials += numtrials[i]; } fprintf(stderr," Number of trials: %d, number of event types: %d\n",sumtrials,nevents); Trial *alltrials = ConcatenateTrials(trial,numtrials,run_duration,nlists,sumtrials); CheckTrialLabels(alltrials,sumtrials); /* read contrast vector */ gsl_vector *cont = gsl_vector_alloc(contrast.number + nuisance_dim); gsl_vector_set_zero(cont); for (i=0; i < contrast.number; i++) { double u = ((VFloat *)contrast.vector)[i]; gsl_vector_set(cont,i,u); } /* alloc initial design matrix X */ gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); fprintf(stderr," Design file dimensions: %lu x %lu\n",X->size1,X->size2); gsl_matrix *XInv = gsl_matrix_calloc(X->size2,X->size1); if (X->size2 != cont->size) { VError(" dimension of contrast vector (%ld) does not match design matrix (%ld)",cont->size-1,X->size2); } /* ini random permutations */ gsl_rng_env_setup(); const gsl_rng_type *T = gsl_rng_default; gsl_rng *rx = gsl_rng_alloc(T); gsl_rng_set(rx,(unsigned long int)seed); if (verbose) fprintf(stderr," seed: %ld\n",(long)seed); int **permtable = genperm(rx,numtrials,sumtrials,dlists,(int)numperm); /* estimate null variance to adjust radiometric parameter, use first 30 permutations */ int nperm=0; float stddev = 1.0; double meanvar = 0.0; if (numperm > 0) { int tstperm = 30; if (tstperm > numperm) tstperm = numperm; VImage zmap = VCreateImage(nslices,nrows,ncols,VFloatRepn); double varsum=0,nx=0; for (nperm = 0; nperm < tstperm; nperm++) { Trial *permtrials = CopyTrials(alltrials,sumtrials); int j=0; for (j=0; j<sumtrials; j++) { int j0 = permtable[nperm][j]; permtrials[j].id = alltrials[j0].id; } gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); gsl_matrix *XInv = gsl_matrix_calloc(X->size2,X->size1); VHemoModel(permtrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); VGLM(Data,X,XInv,cont,map,zmap); varsum += VImageVar(zmap); nx++; gsl_matrix_free(X); gsl_matrix_free(XInv); VFree(permtrials); } meanvar = varsum/nx; stddev = (float)(sqrt(meanvar)); /* update stddev */ VDestroyImage(zmap); } /* no permutation */ VImage zmap1 = VCreateImage(nslices,nrows,ncols,VFloatRepn); VCopyImageAttrs (map,zmap1); VImage dst1 = VCreateImageLike (zmap1); VHemoModel(alltrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); if (strlen(plot_filename) > 0) PlotDesign(X,tr,plot_filename); VGLM(Data,X,XInv,cont,map,zmap1); if (numperm == 0) { double z = VImageVar(zmap1); stddev = sqrt(z); /* update stddev */ } float mode=0; if (numperm > 0) VZScale(zmap1,mode,stddev); VBilateralFilter(zmap1,dst1,(int)radius,(double)rvar,(double)svar,(int)numiter); /* ini histograms */ double hmin=0,hmax=0; VGetHistRange(dst1,&hmin,&hmax); size_t nbins = 20000; gsl_histogram *hist0 = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (hist0,hmin,hmax); gsl_histogram *histz = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (histz,hmin,hmax); HistoUpdate(dst1,histz); /* random permutations */ #pragma omp parallel for shared(Data) schedule(dynamic) for (nperm = 0; nperm < numperm; nperm++) { if (nperm%5 == 0) fprintf(stderr," perm %4d of %d\r",nperm,(int)numperm); /* randomly shuffle trial labels */ Trial *permtrials = CopyTrials(alltrials,sumtrials); int j=0; for (j=0; j<sumtrials; j++) { int j0 = permtable[nperm][j]; permtrials[j].id = alltrials[j0].id; } /* hemodynamic model */ gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); gsl_matrix *XInv = gsl_matrix_calloc(X->size2,X->size1); VHemoModel(permtrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); /* GLM */ VImage zmap = VCreateImageLike(zmap1); VGLM(Data,X,XInv,cont,map,zmap); VZScale(zmap,mode,stddev); gsl_matrix_free(X); gsl_matrix_free(XInv); VFree(permtrials); /* bilateral filter */ VImage dst = VCreateImageLike (zmap); VBilateralFilter(zmap,dst,(int)radius,(double)rvar,(double)svar,(int)numiter); #pragma omp critical { HistoUpdate(dst,hist0); } VDestroyImage(dst); VDestroyImage(zmap); } /* apply fdr */ VImage fdrimage = VCopyImage (dst1,NULL,VAllBands); if (numperm > 0) { FDR(dst1,fdrimage,hist0,histz,(double)alpha); if (cleanup && alpha < 1.0) { VIsolatedVoxels(fdrimage,(float)(1.0-alpha)); } } /* ** output */ out_list = VCreateAttrList (); VHistory(VNumber(options),options,prg_name,&list[0],&out_list); /* update geoinfo, 4D to 3D */ if (geolist != NULL) { double *D = VGetGeoDim(geolist,NULL); D[0] = 3; D[4] = 1; VSetGeoDim(geolist,D); } VSetGeoInfo(geolist,out_list); VAppendAttr (out_list,"image",NULL,VImageRepn,fdrimage); fp = VOpenOutputFile (out_filename, TRUE); if (! VWriteFile (fp, out_list)) exit (1); fclose(fp); fprintf (stderr, "\n%s: done.\n", argv[0]); exit(0); }
hcb_basis_core.h
#ifndef _HCB_BASIS_CORE_H #define _HCB_BASIS_CORE_H #include <complex> #include <vector> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "benes_perm.h" #include "openmp.h" namespace basis_general { template<class I> I inline hcb_map_bits(I s,const int map[],const int N){ I ss = 0; for(int i=N-1;i>=0;--i){ int j = map[i]; ss ^= (j<0 ? ((s&1)^1)<<(N+j) : (s&1)<<(N-j-1) ); s >>= 1; } return ss; } template<class I,class P=signed char> class hcb_basis_core : public general_basis_core<I,P> { public: std::vector<tr_benes<I>> benes_maps; std::vector<I> invs; hcb_basis_core(const int _N, const bool _fermionic=false) : \ general_basis_core<I>::general_basis_core(_N,_fermionic) {} hcb_basis_core(const int _N,const int _nt,const int _maps[], \ const int _pers[], const int _qs[], const bool _fermionic=false) : \ general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs,_fermionic) { benes_maps.resize(_nt); invs.resize(_nt); ta_index<I> index; for(int j=0;j<bit_info<I>::bits;j++){index.data[j] = no_index;} for(int i=0;i<_nt;i++){ const int * map = &general_basis_core<I,P>::maps[i*_N]; I inv = 0; for(int j=0;j<_N;j++){ int m = map[j]; int bit_j = _N - j - 1; if(m<0){ int bit_m = _N + m; index.data[bit_j] = bit_m; inv ^= ((I)1 << bit_j); } else{ int bit_m = _N - m -1; index.data[bit_j] = bit_m; } } gen_benes<I>(&benes_maps[i],index); invs[i] = inv; } } ~hcb_basis_core() {} npy_intp get_prefix(const I s,const int N_p){ return integer_cast<npy_intp,I>(s >> (general_basis_core<I,P>::N - N_p)); } I map_state(I s,int n_map,P &sign){ if(general_basis_core<I,P>::nt<=0){ return s; } return benes_bwd(&benes_maps[n_map],s^invs[n_map]); } void map_state(I s[],npy_intp M,int n_map,P sign[]){ if(general_basis_core<I,P>::nt<=0){ return; } const tr_benes<I> * benes_map = &benes_maps[n_map]; const I inv = invs[n_map]; #pragma omp for schedule(static) for(npy_intp i=0;i<M;i++){ s[i] = benes_bwd(benes_map,s[i]^inv); } } std::vector<int> count_particles(const I s){ std::vector<int> v(1); v[0] = bit_count(s,general_basis_core<I,P>::N); return v; } // I map_state(I s,int n_map,int &sign){ // if(general_basis_core<I,P>::nt<=0){ // return s; // } // const int n = general_basis_core<I,P>::N; // return hcb_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n); // } // void map_state(I s[],npy_intp M,int n_map,signed char sign[]){ // if(general_basis_core<I,P>::nt<=0){ // return; // } // const int n = general_basis_core<I,P>::N; // const int * map = &general_basis_core<I,P>::maps[n_map*n]; // #pragma omp for schedule(static,1) // for(npy_intp i=0;i<M;i++){ // s[i] = hcb_map_bits(s[i],map,n); // } // } I inline next_state_pcon(const I s,const I nns){ if(s==0){return s;} I t = (s | (s - 1)) + 1; return t | ((((t & (0-t)) / (s & (0-s))) >> 1) - 1); } int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){ const I s = r; const I one = 1; const int NN = general_basis_core<I,P>::N; for(int j=n_op-1;j>-1;j--){ const int ind = NN-indx[j]-1; const I b = (one << ind); const bool a = (bool)((r >> ind)&one); const char op = opstr[j]; switch(op){ case 'z': m *= (a?0.5:-0.5); break; case 'n': m *= (a?1:0); break; case 'x': r ^= b; m *= 0.5; break; case 'y': m *= (a?std::complex<double>(0,0.5):std::complex<double>(0,-0.5)); r ^= b; break; case '+': m *= (a?0:1); r ^= b; break; case '-': m *= (a?1:0); r ^= b; break; case 'I': break; default: return -1; } if(m.real()==0 && m.imag()==0){ r = s; break; } } return 0; } }; } #endif
omp_mm.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to a predefined chunk size. * AUTHOR: Blaise Barney * LAST REVISED: 06/28/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define NRA 862 /* number of rows in matrix A */ #define NCA 865 /* number of columns in matrix A */ #define NCB 867 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ chunk = 10; /* set loop iteration chunk size */ /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #include "omp_helper.h" /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); #include "directive.h" for (i=0; i<NRA; i++) { // printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } /*** End of parallel region ***/ printf ("Done.\n"); }
GB_unop__identity_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_uint8) // op(A') function: GB (_unop_tran__identity_int16_uint8) // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_uint8) ( int16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ethereum_fmt_plug.c
/* * JtR format to crack password protected Ethereum Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ethereum; #elif FMT_REGISTERS_H john_register_one(&fmt_ethereum); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 // tuned on i7-6600U #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX 1 // hack, we can't use our simd pbkdf2 code for presale wallets because of varying salt #include "pbkdf2_hmac_sha256.h" #include "ethereum_common.h" #include "escrypt/crypto_scrypt.h" #include "KeccakHash.h" #include "aes.h" #include "jumbo.h" #include "memdbg.h" #define FORMAT_NAME "Ethereum Wallet" #define FORMAT_LABEL "ethereum" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 16 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; custom_salt *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static void set_salt(void *salt) { cur_salt = (custom_salt *)salt; } static void ethereum_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static unsigned char *dpad = (unsigned char*)"\x02\x00\x00\x00\x00\x00\x00\x00"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][32]; int i; if (cur_salt->type == 0) { #ifdef SIMD_COEF_64 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, master[i], 32, 0); #endif } else if (cur_salt->type == 1) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) crypto_scrypt((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->N, cur_salt->r, cur_salt->p, master[i], 32); } else if (cur_salt->type == 2) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), 2000, master[i], 16, 0); } if (cur_salt->type == 0 || cur_salt->type == 1) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { Keccak_HashInstance hash; Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); // delimitedSuffix is 0x06 for SHA-3, and 0x01 for Keccak Keccak_HashUpdate(&hash, master[i] + 16, 16 * 8); Keccak_HashUpdate(&hash, cur_salt->ct, cur_salt->ctlen * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]); } } else { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { AES_KEY akey; Keccak_HashInstance hash; unsigned char iv[16]; unsigned char seed[4096]; int padbyte; int datalen; AES_set_decrypt_key(master[i], 128, &akey); memcpy(iv, cur_salt->encseed, 16); AES_cbc_encrypt(cur_salt->encseed + 16, seed, cur_salt->eslen - 16, &akey, iv, AES_DECRYPT); if (check_pkcs_pad(seed, cur_salt->eslen - 16, 16) < 0) { memset(crypt_out[index+i], 0, BINARY_SIZE); continue; } padbyte = seed[cur_salt->eslen - 16 - 1]; datalen = cur_salt->eslen - 16 - padbyte; if (datalen < 0) { memset(crypt_out[index+i], 0, BINARY_SIZE); continue; } Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); Keccak_HashUpdate(&hash, seed, datalen * 8); Keccak_HashUpdate(&hash, dpad, 1 * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_ethereum = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "iteration count", }, { FORMAT_TAG }, ethereum_tests }, { init, done, fmt_default_reset, fmt_default_prepare, ethereum_common_valid, fmt_default_split, ethereum_get_binary, ethereum_common_get_salt, { ethereum_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, ethereum_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return omp_get_max_threads(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) ret += coord[i] * stride[i]; return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; template<typename OP> struct Kernel<OP, cpu> { /*! \brief Launch CPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<cpu> *, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { // Zero means not to use OMP, but don't interfere with external OMP behavior for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads <= 1) { OP::Map(0, N, args...); } else { int length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
linAlgWeightedNorm2.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(weightedNorm2)(const dlong & Nblocks, const dlong & N, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for reduction(+:wa2) #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; wa2 += ai*ai*wi; } cpu_wa[0] = wa2; } extern "C" void FUNC(weightedNorm2Many)(const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i]; wa2 += ai*ai*wi; } } cpu_wa[0] = wa2; }
gi_labeling_to_bounary_labeling.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef VERTEX_LABELING_TO_BOUNDARY_LABELING_H #define VERTEX_LABELING_TO_BOUNDARY_LABELING_H #include <omp.h> #include "gi_basic_types.h" #include "gi_regular_grid_3d.h" #include "gi_topological_regular_grid_3d.h" #include "gi_labeling.h" #include "gi_array_index_partition.h" namespace GInt { template<typename INPUT_LABEL_TYPE, class MaxVLType> class VertexLabelingToBoundaryLabeling { protected: DenseLabeling<char>* m_output_labels; TopologicalRegularGrid3D* m_topological_grid; public: VertexLabelingToBoundaryLabeling(TopologicalRegularGrid3D* topological_grid, DenseLabeling<char>* output_labels) : m_topological_grid(topological_grid) { m_output_labels = output_labels; } VertexLabelingToBoundaryLabeling(TopologicalRegularGrid3D* topological_grid) : m_topological_grid(topological_grid) { m_output_labels = new DenseLabeling<char>(m_topological_grid->numCells()); } void OutputEdgesToFile(const char* filename) { FILE* fout = fopen(filename, "wb"); TopologicalRegularGrid3D::DCellsIterator edges(m_topological_grid, 1); for (edges.begin(); edges.valid(); edges.advance()) { INDEX_TYPE edge = edges.value(); if (m_output_labels->GetLabel(edge) == 1) fwrite(&edge, sizeof(INDEX_TYPE), 1, fout); } fclose(fout); } DenseLabeling<char>* GetOutputLabels() { return m_output_labels; } void InitializeFirst() { m_output_labels->SetAll(0); } DenseLabeling<char>* ComputeMINBoundary(DenseLabeling<INPUT_LABEL_TYPE>* input_labels) { #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); std::vector<INDEX_TYPE> partition; ArrayIndexPartitioner::EvenChunkSplit(m_topological_grid->numCells(), num_threads, partition); TopologicalRegularGrid3D::DCellsIterator edges(m_topological_grid, 1, partition[thread_num], partition[thread_num + 1]); for (edges.begin(); edges.valid(); edges.advance()) { TopologicalRegularGrid3D::FacetsIterator vertices(m_topological_grid); INDEX_TYPE edge = edges.value(); vertices.begin(edge); INDEX_TYPE vertex1 = vertices.value(); vertices.advance(); INDEX_TYPE vertex2 = vertices.value(); INDEX_TYPE vertex_number1 = m_topological_grid->VertexNumberFromCellID(vertex1); INDEX_TYPE vertex_number2 = m_topological_grid->VertexNumberFromCellID(vertex2); auto lab1 = input_labels->GetLabel(vertex_number1); auto lab2 = input_labels->GetLabel(vertex_number2); if (lab1 == -1 || lab2 == -1) continue; if (lab1 != lab2) { (*m_output_labels)[edge] = 1; } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator quads(m_topological_grid, 2, partition[thread_num], partition[thread_num + 1]); for (quads.begin(); quads.valid(); quads.advance()) { INDEX_TYPE quad = quads.value(); TopologicalRegularGrid3D::FacetsIterator quadedges(m_topological_grid); for (quadedges.begin(quad); quadedges.valid(); quadedges.advance()) { if ((*m_output_labels)[quadedges.value()] == 1) { (*m_output_labels)[quad] = 1; break; } } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator voxels(m_topological_grid, 3, partition[thread_num], partition[thread_num + 1]); for (voxels.begin(); voxels.valid(); voxels.advance()) { INDEX_TYPE voxel = voxels.value(); TopologicalRegularGrid3D::FacetsIterator voxelquads(m_topological_grid); for (voxelquads.begin(voxel); voxelquads.valid(); voxelquads.advance()) { if ((*m_output_labels)[voxelquads.value()] == 1) { (*m_output_labels)[voxel] = 1; break; } } } } return m_output_labels; } void ComputeMAXBoundary(DenseLabeling<INPUT_LABEL_TYPE>* input_labels, MaxVLType* maxv_labeling) { #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); std::vector<INDEX_TYPE> partition; ArrayIndexPartitioner::EvenChunkSplit(m_topological_grid->numCells(), num_threads, partition); TopologicalRegularGrid3D::DCellsIterator quads(m_topological_grid, 2, partition[thread_num], partition[thread_num + 1]); for (quads.begin(); quads.valid(); quads.advance()) { TopologicalRegularGrid3D::CofacetsIterator hexs(m_topological_grid); INDEX_TYPE quad = quads.value(); if (m_topological_grid->boundaryValue(quad) != 0) continue; hexs.begin(quad); INDEX_TYPE hex1 = hexs.value(); hexs.advance(); INDEX_TYPE hex2 = hexs.value(); INDEX_TYPE v1gid = maxv_labeling->Cell2HighestVertex(hex1); INDEX_TYPE v2gid = maxv_labeling->Cell2HighestVertex(hex2); if (v1gid == v2gid) continue; // they are part of same lower star so no worries here INDEX_TYPE vertex_number1 = m_topological_grid->VertexNumberFromCellID(v1gid); INDEX_TYPE vertex_number2 = m_topological_grid->VertexNumberFromCellID(v2gid); auto lab1 = input_labels->GetLabel(vertex_number1); auto lab2 = input_labels->GetLabel(vertex_number2); if (lab1 == -1 || lab2 == -1) continue; if (lab1 != lab2) { (*m_output_labels)[quad] += 2; } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator edges(m_topological_grid, 1, partition[thread_num], partition[thread_num + 1]); for (edges.begin(); edges.valid(); edges.advance()) { INDEX_TYPE edge = edges.value(); if (m_topological_grid->boundaryValue(edge) != 0) continue; TopologicalRegularGrid3D::CofacetsIterator quadedges(m_topological_grid); for (quadedges.begin(edge); quadedges.valid(); quadedges.advance()) { if ((*m_output_labels)[quadedges.value()] > 1) { (*m_output_labels)[edge] += 2; break; } } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator vertices(m_topological_grid, 0, partition[thread_num], partition[thread_num + 1]); for (vertices.begin(); vertices.valid(); vertices.advance()) { INDEX_TYPE vertex = vertices.value(); if (m_topological_grid->boundaryValue(vertex) != 0) continue; TopologicalRegularGrid3D::CofacetsIterator voxelquads(m_topological_grid); for (voxelquads.begin(vertex); voxelquads.valid(); voxelquads.advance()) { if ((*m_output_labels)[voxelquads.value()] > 1) { (*m_output_labels)[vertex] += 2; break; } } } } return;// m_output_labels; } DenseLabeling<char>* ComputeBoundaryHACK() { #pragma omp parallel { int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); std::vector<INDEX_TYPE> partition; ArrayIndexPartitioner::EvenChunkSplit(m_topological_grid->numCells(), num_threads, partition); TopologicalRegularGrid3D::DCellsIterator edges(m_topological_grid, 1, partition[thread_num], partition[thread_num + 1]); for (edges.begin(); edges.valid(); edges.advance()) { TopologicalRegularGrid3D::FacetsIterator vertices(m_topological_grid); INDEX_TYPE edge = edges.value(); vertices.begin(edge); INDEX_TYPE vertex1 = vertices.value(); vertices.advance(); INDEX_TYPE vertex2 = vertices.value(); INDEX_TYPE vertex_number1 = m_topological_grid->VertexNumberFromCellID(vertex1); INDEX_TYPE vertex_number2 = m_topological_grid->VertexNumberFromCellID(vertex2); if ((*(this->m_input_labels))[vertex_number1] != (*(this->m_input_labels))[vertex_number2]) { INDEX_TYPE lv = ((*(this->m_input_labels))[vertex_number1] > (*(this->m_input_labels))[vertex_number2] ? (*(this->m_input_labels))[vertex_number1] : (*(this->m_input_labels))[vertex_number2]); (*(this->m_output_labels))[edge] = (lv % 126 + 1); } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator quads(m_topological_grid, 2, partition[thread_num], partition[thread_num + 1]); for (quads.begin(); quads.valid(); quads.advance()) { INDEX_TYPE quad = quads.value(); TopologicalRegularGrid3D::FacetsIterator quadedges(m_topological_grid); for (quadedges.begin(quad); quadedges.valid(); quadedges.advance()) { if ((*m_output_labels)[quadedges.value()] == 1) { (*m_output_labels)[quad] = 1; break; } } } #pragma omp barrier TopologicalRegularGrid3D::DCellsIterator voxels(m_topological_grid, 3, partition[thread_num], partition[thread_num + 1]); for (voxels.begin(); voxels.valid(); voxels.advance()) { INDEX_TYPE voxel = voxels.value(); TopologicalRegularGrid3D::FacetsIterator voxelquads(m_topological_grid); for (voxelquads.begin(voxel); voxelquads.valid(); voxelquads.advance()) { if ((*m_output_labels)[voxelquads.value()] == 1) { (*m_output_labels)[voxel] = 1; break; } } } } return m_output_labels; } }; } #endif
thread_info.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_UTIL_THREAD_INFO_H_ #define CORE_UTIL_THREAD_INFO_H_ #include <omp.h> #include <sched.h> #include <vector> #include "core/util/log.h" #include "core/util/numa.h" namespace bdm { /// \brief This class stores information about each thread. (e.g. to which NUMA /// node it belongs to.) /// NB: Threads **must** be bound to CPUs using `OMP_PROC_BIND=true`. class ThreadInfo { public: static ThreadInfo* GetInstance() { static ThreadInfo kInstance; return &kInstance; } /// Returns the number of NUMA nodes on this machine int GetNumaNodes() const { return numa_nodes_; } /// Returns the numa node the given openmp thread is bound to. int GetNumaNode(int omp_thread_id) const { return thread_numa_mapping_[omp_thread_id]; } /// Returns the number of threads in a given NUMA node. int GetThreadsInNumaNode(int numa_node) const { return threads_in_numa_[numa_node]; } /// Return the numa thread id of an openmp thread. int GetNumaThreadId(int omp_thread_id) const { return numa_thread_id_[omp_thread_id]; } /// Return the maximum number of threads. int GetMaxThreads() const { return max_threads_; } /// Renews the metadata.\n /// Whenever a thread is scheduled on a different cpu, e.g. using /// `numa_run_on_node`, `Renew()` must be called to update the thread /// metadata. void Renew() { max_threads_ = omp_get_max_threads(); numa_nodes_ = numa_num_configured_nodes(); thread_numa_mapping_.clear(); numa_thread_id_.clear(); threads_in_numa_.clear(); thread_numa_mapping_.resize(max_threads_, 0); numa_thread_id_.resize(max_threads_, 0); threads_in_numa_.resize(numa_nodes_, 0); // (openmp thread id -> numa node) #pragma omp parallel { int tid = omp_get_thread_num(); thread_numa_mapping_[tid] = numa_node_of_cpu(sched_getcpu()); } // (numa -> number of associated threads), and // (omp_thread_id -> thread id in numa) for (uint16_t n = 0; n < numa_nodes_; n++) { uint64_t cnt = 0; for (uint64_t t = 0; t < max_threads_; t++) { int numa = thread_numa_mapping_[t]; if (n == numa) { numa_thread_id_[t] = cnt; cnt++; } } threads_in_numa_[n] = cnt; } } friend std::ostream& operator<<(std::ostream& str, const ThreadInfo& ti) { str << "max_threads " << ti.max_threads_ << "\nnum_numa nodes " << ti.numa_nodes_; str << "\nthread to numa mapping "; for (auto& el : ti.thread_numa_mapping_) { str << " " << el; } str << "\nthread id in numa node "; for (auto& el : ti.numa_thread_id_) { str << " " << el; } str << "\nnum threads per numa "; for (auto& el : ti.threads_in_numa_) { str << " " << el; } str << "\n"; return str; } private: /// Maximum number of threads for this simulation. uint64_t max_threads_; /// Number of NUMA nodes on this machine. uint16_t numa_nodes_; /// Contains the mapping thread id -> numa node \n /// vector position = omp_thread_id \n /// vector value = numa node std::vector<int> thread_numa_mapping_; /// Contains the mapping omp_thread_id -> numa thread id \n /// each thread in a numa domain has a unique id in the range 0 to number \n /// of threads in this numa domain std::vector<int> numa_thread_id_; /// Contains the mapping numa node -> total number of threads in this numa /// node \n /// vector position: numa node \n /// vector value number of threads std::vector<int> threads_in_numa_; ThreadInfo() { if (omp_get_proc_bind() != 1) { Log::Fatal("ThreadInfo", "The environmental variable OMP_PROC_BIND must be set to " "true. On Linux run 'export OMP_PROC_BIND=true' prior to " "running BioDynaMo"); } Renew(); } }; } // namespace bdm #endif // CORE_UTIL_THREAD_INFO_H_
GB_binop__isge_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int64) // A*D function (colscale): GB (_AxD__isge_int64) // D*A function (rowscale): GB (_DxB__isge_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int64) // C=scalar+B GB (_bind1st__isge_int64) // C=scalar+B' GB (_bind1st_tran__isge_int64) // C=A+scalar GB (_bind2nd__isge_int64) // C=A'+scalar GB (_bind2nd_tran__isge_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT64 || GxB_NO_ISGE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MobileNet_CPU_imagenet.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include <string.h> #include "sparse.h" double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 224 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // jocare // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS int im_sizes[27] = { 224, 224, 112, 112, 112, 112, 56, 56, 56, 56, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 14, 14, 14, 14, 14 }; int strides[26] = { 1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1 }; int mem_block_shape[3] = { 1024, 224, 224 }; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; float ***wd; float **bd; float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2 }; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 43, 32, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; #else // PLAIN int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 64, 32, 1, 1 }, { 64, 1, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 128, 1, CONV_SIZE, CONV_SIZE }, { 128, 128, 1, 1 }, { 128, 1, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 256, 1, CONV_SIZE, CONV_SIZE }, { 256, 256, 1, 1 }, { 256, 1, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 1024, 512, 1, 1 }, { 1024, 1, CONV_SIZE, CONV_SIZE }, { 1024, 1024, 1, 1 } }; int dshape[1][2]= { { 1024, 1000} }; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 32; block1 = malloc(max_channels * sizeof(float**)); block2 = malloc(max_channels * sizeof(float**)); // allocate block memory for(i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float*)); block2[i] = malloc(max_im_size * sizeof(float*)); for(j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*)); } } // wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float****)); // allocate kernel memory for(l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float***)); for(i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float**)); for(j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*)); for(k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3]* sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float*)); batchnorm_biases = malloc(27 * sizeof(float*)); batchnorm_means = malloc(27 * sizeof(float*)); batchnorm_vars = malloc(27 * sizeof(float*)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float**)); bd = malloc(1 * sizeof(float*)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float*)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; iin = fopen(in_file, "r"); if (iin == NULL) { printf("File %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); //printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { //printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); //print_csr(a); wc_sparse[l][i][j] = a; //printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****)); wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("File %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i=i+stride) { for (j = 0; j < size; j=j+stride) { sum = zeropad[i ][j ] * kernel[0][0] + zeropad[i ][j + 1] * kernel[0][1] + zeropad[i ][j + 2] * kernel[0][2] + zeropad[i + 1][j ] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j ] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size, int stride) { int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; i+=stride) { for (j = 0; j < size; j+=stride) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { int i, j, k, l; float sum; #pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for(j = 0; j < image_size; j++) { for(k = 0; k < image_size; k++) { sum = 0.; for(l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0][0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } } /****************************************************************************************************************************/ void pointwise_convolution_sparse(float **matrix, csr_t* kernel, float **out, int size) { int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; ++i) { for (j = 0; j < size; ++j) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for(i = 0; i < image_size; i++) { for(j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd ) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); //out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level+1][0]; //printf("level %i: %i ==> %i\n", level, input_channels, output_channels); for(i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level+1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back into the first memory block #if SPARSE_CONVOLUTIONS for(i = 0; i < output_channels; i++) { for(j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level] ); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level+1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ float avg_of(float **in, int start_x, int start_y, int kernel_size) { float sum = 0.; for(int i = 0; i < kernel_size; ++i) { for(int j = 0; j < kernel_size; ++j) { sum += in[start_x+i][start_y+j]; } } return sum / (kernel_size * kernel_size); } /****************************************************************************************************************************/ void avg_pool(float ***in, float ***out, int channels, int k_size, int image_size) { for(int c = 0; c < channels; ++c) { out[c][0][0] = avg_of(in[c],0,0, 7); out[c][0][1] = avg_of(in[c],0,6, 7); out[c][1][0] = avg_of(in[c],6,0, 7); out[c][1][1] = avg_of(in[c],6,6, 7); } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { int i, j; #pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer;// * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL){ printf("Error opening file!\n"); exit(1); } for(int i = 0; i < 32; i++) { for(int j = 0; j < mem_block_shape[1]; j++) { for(int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer;// * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL){ printf("Error opening file!\n"); exit(1); } for(int o = 0; o < cshape[layer][0]; o++) { for(int i = 0; i < cshape[layer][1]; i++) { for(int k_h = 0; k_h < cshape[layer][2]; k_h++) { for(int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL){ printf("Error opening file!\n"); exit(1); } for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c=0; if (only_convolution == 1) { //for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum=-1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if(mem_block1_dense[i]>maximum){ maximum=mem_block2_dense[i]; c=i+1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n",c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 32, 32); reset_mem_block(block2); // depthwise convolutions for(level = 1; level < (CONV_LEVELS - 1); level=level+2) { depthwise_convolution(block1, block2, wc[level], wc[level+1], (level)); } // average pool 7 with 1024 channels of 14x14 images avg_pool(block1, block2, 1024, 7, 14); // flatten flatten(block2, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(results, only_convolution); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); output_predictions(results, only_convolution, 1024, 1); } //free_memory(); fclose(file_list); return 0; }
stream.c
// Copyright 2009-2020 NTESS. Under the terms // of Contract DE-NA0003525 with NTESS, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2020, NTESS // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { const int LENGTH = 2000; printf("\n\n\nHello CramSim!!!!\n"); printf("Run a stream application with ariel and cramsim\n"); printf("------------------------------------------------------\n"); printf("Allocating arrays of size %d elements.\n", LENGTH); double* a = (double*) malloc(sizeof(double) * LENGTH); double* b = (double*) malloc(sizeof(double) * LENGTH); double* c = (double*) malloc(sizeof(double) * LENGTH); printf("Done allocating arrays.\n"); int i; for(i = 0; i < LENGTH; ++i) { a[i] = i; b[i] = LENGTH - i; c[i] = 0; } printf("Perfoming the fast_c compute loop...\n"); #pragma omp parallel for for(i = 0; i < LENGTH; ++i) { //printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i])); c[i] = 2.0 * a[i] + 1.5 * b[i]; } double sum = 0; for(i = 0; i < LENGTH; ++i) { sum += c[i]; } printf("Sum of arrays is: %f\n", sum); printf("Freeing arrays...\n"); free(a); free(b); free(c); printf("Done.\n"); }
rules.h
/* * Copyright (c) 2018 * Markus Goetz * * This software may be modified and distributed under the terms of MIT-style license. * * Description: Cluster remapping rules * * Maintainer: m.goetz * * Email: markus.goetz@kit.edu */ #ifndef RULES_H #define RULES_H #include <omp.h> #include <unordered_map> #include "constants.h" class Rules { std::unordered_map<Cluster, Cluster> m_rules; public: Rules() { m_rules[NOISE] = 0; } inline const std::unordered_map<Cluster, Cluster>::const_iterator begin() const { return m_rules.begin(); } inline const std::unordered_map<Cluster, Cluster>::const_iterator end() const { return m_rules.end(); } inline void remove(const Cluster index) { m_rules.erase(m_rules.find(index)); } Cluster rule(const Cluster cluster) const { const auto& pair = m_rules.find(cluster); if (pair != m_rules.end()) { return pair->second; } return NOT_VISITED; } inline size_t size() const { return m_rules.size(); } bool update(const Cluster first, const Cluster second) { if (first <= second or first >= NOISE) { return false; } const auto& pair = m_rules.find(first); if (pair != m_rules.end()) { if (pair->second > second) { update(pair->second, second); m_rules[first] = second; } else { update(second, pair->second ); } } else { m_rules[first] = second; } return true; } }; void merge(Rules& omp_out, Rules& omp_in) { for (const auto& rule : omp_in) { omp_out.update(rule.first, rule.second); } } #pragma omp declare reduction(merge: Rules: merge(omp_out, omp_in)) initializer(omp_priv(omp_orig)) #endif // RULES_H
ej3.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> #define TAM 4000 void rellenarArray(float *M){ #pragma omp parallel for schedule(guided) num_threads(4) for(int i=0;i<TAM;++i){ *(M+i)=5.0f; } } //Se puede hacer con reduction(:+total) o pramga omp critical antes de la posicion de memoria a la que van a acceder todos los hilos. int main() { int numthreads;float total=0.0f;double start; float *a=(float *)malloc(sizeof(float)*TAM); float *b=(float *)malloc(sizeof(float)*TAM); rellenarArray(a);rellenarArray(b); printf("\nTamanyo de los vectores: %i\n", TAM); start = omp_get_wtime();numthreads=2; #pragma omp parallel for schedule(guided) num_threads(numthreads) reduction(+:total) for(int i=0;i<TAM;++i) total+=*(a+i)*(*(b+i)); printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos, %lfs\n-------------------------------------------\n",numthreads, omp_get_wtime()-start); total=0.0f; start = omp_get_wtime();numthreads=4; #pragma omp parallel for schedule(guided) num_threads(numthreads) reduction(+:total) for(int i=0;i<TAM;++i) total+=*(a+i)*(*(b+i)); printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos, %lfs\n-------------------------------------------\n",numthreads, omp_get_wtime()-start); total=0.0f; start = omp_get_wtime();numthreads=6; #pragma omp parallel for schedule(guided) num_threads(numthreads) reduction(+:total) for(int i=0;i<TAM;++i) total+=*(a+i)*(*(b+i)); printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos, %lfs\n-------------------------------------------\n",numthreads, omp_get_wtime()-start); total=0.0f; start = omp_get_wtime();numthreads=8; #pragma omp parallel for schedule(guided) num_threads(numthreads) reduction(+:total) for(int i=0;i<TAM;++i) total+=*(a+i)*(*(b+i)); printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos, %lfs\n-------------------------------------------\n",numthreads, omp_get_wtime()-start); printf("\nResultado final, valor: %f\n", total); return 0; }
bias_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: chh@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_bias_fp32(struct tensor* input_tensor, struct tensor* output_tensor, struct tensor* bias_tensor, int num_thread) { int channels = input_tensor->dims[1]; int h = input_tensor->dims[2]; int w = input_tensor->dims[3]; int size = h * w; float* in_data = input_tensor->data; float* bias = bias_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channels; c++) { float* out_ptr = out_data + c * size; float* in_ptr = in_data + c * size; for (int i = 0; i < size; i++) { out_ptr[i] = in_ptr[i] + bias[c]; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* bias_tensor; struct tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_bias_fp32(input_tensor, output_tensor, bias_tensor, exec_graph->num_thread); else TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_bias_ref_op(void* arg) { return register_builtin_node_ops(OP_BIAS, &hcl_node_ops); } int unregister_bias_ref_op(void* arg) { return unregister_builtin_node_ops(OP_BIAS, &hcl_node_ops); }
GB_unop__identity_fc32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_uint64) // op(A') function: GB (_unop_tran__identity_fc32_uint64) // C type: GxB_FC32_t // A type: uint64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_uint64) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint64 // op(A') function: GB_tran__abs_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_fp64 // op(A') function: GB_unop_tran__identity_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_fp64 ( uint16_t *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__minv_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int32 // op(A') function: GB_tran__minv_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int32 ( int16_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subref_template.c
//------------------------------------------------------------------------------ // GB_subref_template: C = A(I,J), or C = pattern (A(I,J)) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #if defined ( GB_SYMBOLIC ) // symbolic method must tolerate zombies #define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen) #else // numeric method will not see any zombies #define GB_Ai(p) GBI (Ai, p, avlen) #endif // to iterate across all entries in a bucket: #define GB_for_each_index_in_bucket(inew,i) \ for (int64_t inew = Mark[i]-1 ; inew >= 0 ; inew = Inext [inew]) // copy values from A(:,kA) to C(:,kC): Cx [pC:pC+len-1] = ... (pA:pA+len-1). #if defined ( GB_SYMBOLIC ) // symbolic copy: Cx is int64_t; Ax is ignored #define GB_COPY_RANGE(pC,pA,len) \ for (int64_t k = 0 ; k < (len) ; k++) \ { \ Cx [(pC) + k] = (pA) + k ; \ } #else // numeric copy: Cx and Ax are both (GB_void *), and point to the same type #define GB_COPY_RANGE(pC,pA,len) \ memcpy (Cx + (pC)*asize, Ax + (pA)*asize, (len) * asize) ; #endif // copy a single value from A(:,kA) to C(:,kC): Cx [pC] = ... (pA]) #if defined ( GB_SYMBOLIC ) // symbolic copy: Cx is int64_t; Ax is ignored #define GB_COPY_ENTRY(pC,pA) \ Cx [pC] = (pA) ; #else // numeric copy: Cx and Ax are both (GB_void *), and point to the same type #define GB_COPY_ENTRY(pC,pA) \ /* Cx [pC] = Ax [pA] */ \ memcpy (Cx + (pC)*asize, Ax + (pA)*asize, asize) ; #endif // the type of Cx #if defined ( GB_SYMBOLIC ) // C is an int64_t array; the type of A is ignored #define GB_CTYPE int64_t #define GB_CSIZE1 1 #define GB_CSIZE2 (sizeof (int64_t)) #else // C and A have the same type #define GB_CTYPE GB_void #define GB_CSIZE1 asize #define GB_CSIZE2 asize #endif { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; const int64_t avlen = A->vlen ; #if defined ( GB_SYMBOLIC ) const int64_t nzombies = A->nzombies ; #endif #if defined ( GB_PHASE_2_OF_2 ) && defined ( GB_NUMERIC ) ASSERT (C->type = A->type) ; const GB_void *GB_RESTRICT Ax = (GB_void *) A->x ; const int64_t asize = A->type->size ; #endif //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // get I //-------------------------------------------------------------------------- // these values are ignored if Ikind == GB_LIST int64_t ibegin = Icolon [GxB_BEGIN] ; int64_t iinc = Icolon [GxB_INC ] ; int64_t inc = (iinc < 0) ? (-iinc) : iinc ; #ifdef GB_DEBUG int64_t iend = Icolon [GxB_END ] ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,kC); phase2: compute C //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast < 0) ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; } // a coarse task accesses all of I for all its vectors int64_t pI = 0 ; int64_t pI_end = nI ; int64_t ilen = nI ; ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ; //---------------------------------------------------------------------- // compute all vectors C(:,kfirst:klast) for this task //---------------------------------------------------------------------- for (int64_t kC = kfirst ; kC <= klast ; kC++) { //------------------------------------------------------------------ // get C(:,kC) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) // phase1 simply counts the # of entries in C(*,kC). int64_t clen = 0 ; #else // This task computes all or part of C(:,kC), which are the entries // in Ci,Cx [pC:pC_end-1]. int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,kC) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ; } else { // The vectors of C are never sliced for a coarse task, so this // task computes all of C(:,kC). pC = Cp [kC] ; pC_end = Cp [kC+1] ; } int64_t clen = pC_end - pC ; if (clen == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,kA) //------------------------------------------------------------------ int64_t pA, pA_end ; if (fine_task) { // a fine task computes a slice of a single vector C(:,kC). // The task accesses Ai,Ax [pA:pA_end-1], which holds either // the entire vector A(imin:imax,kA) for method 6, the entire // dense A(:,kA) for methods 1 and 2, or a slice of the // A(imin:max,kA) vector for all other methods. pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // a coarse task computes the entire vector C(:,kC). The task // accesses all of A(imin:imax,kA), for most methods, or all of // A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in // Ai,Ax [pA:pA_end-1]. pA = Ap_start [kC] ; pA_end = Ap_end [kC] ; } int64_t alen = pA_end - pA ; if (alen == 0) continue ; //------------------------------------------------------------------ // get I //------------------------------------------------------------------ if (fine_task) { // A fine task accesses I [pI:pI_end-1]. For methods 2 and 6, // pI:pI_end is a subset of the entire 0:nI-1 list. For all // other methods, pI = 0 and pI_end = nI, and the task can // access all of I. pI = TaskList [taskid].pB ; pI_end = TaskList [taskid].pB_end ; ilen = pI_end - pI ; } //------------------------------------------------------------------ // determine the method to use //------------------------------------------------------------------ int method ; if (fine_task) { // The method that the fine task uses for its slice of A(*,kA) // and C(*,kC) has already been determined by GB_subref_slice. method = (int) (-TaskList [taskid].klast) ; } else { // determine the method based on A(*,kA) and I method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, (Mark != NULL), need_qsort, iinc, nduplicates) ; } //------------------------------------------------------------------ // extract C (:,kC) = A (I,kA): consider all cases //------------------------------------------------------------------ switch (method) { //-------------------------------------------------------------- case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense //-------------------------------------------------------------- // A (:,kA) has not been sliced ASSERT (Ikind == GB_ALL) ; ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // copy the entire vector and construct indices #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { int64_t inew = k + pI ; ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ; ASSERT (inew == GB_Ai (pA + inew)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA + pI, ilen) ; #endif break ; //-------------------------------------------------------------- case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense //-------------------------------------------------------------- // This method handles any kind of list I, but A(:,kA) // must be dense. A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I and get the entry in A(:,kA) via direct lookup #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A(i,kA), and it always exists. int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; ASSERT (i == GB_Ai (pA + i)) ; Ci [pC + k] = inew ; GB_COPY_ENTRY (pC + k, pA + i) ; } #endif break ; //-------------------------------------------------------------- case 3 : // the list I has a single index, ibegin //-------------------------------------------------------------- // binary search in GB_subref_phase0 has already found it. // This can be any Ikind with nI=1: GB_ALL with A->vlen=1, // GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0 // (with length 1), or a GB_LIST with ni=1. // Time: 50x faster than MATLAB ASSERT (!fine_task) ; ASSERT (alen == 1) ; ASSERT (nI == 1) ; ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen = 1 ; #else Ci [pC] = 0 ; GB_COPY_ENTRY (pC, pA) ; #endif break ; //-------------------------------------------------------------- case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA) //-------------------------------------------------------------- // Time: 1x MATLAB but low speedup on the Mac. Why? // Probably memory bound since it is just memcpy's. ASSERT (Ikind == GB_ALL && ibegin == 0) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else #if defined ( GB_SYMBOLIC ) if (nzombies == 0) { memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; } else { // with zombies for (int64_t k = 0 ; k < alen ; k++) { // symbolic C(:,kC) = A(:,kA) where A has zombies int64_t i = GB_Ai (pA + k) ; ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ; Ci [pC + k] = i ; } } #else memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; #endif GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 5 : // Ikind is GB_RANGE = ibegin:iend //-------------------------------------------------------------- // Time: much faster than MATLAB. Good speedup too. ASSERT (Ikind == GB_RANGE) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; int64_t inew = i - ibegin ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 6 : // I is short vs nnz (A (:,kA)), use binary search //-------------------------------------------------------------- // Time: very slow unless I is very short and A(:,kA) is // very long. // This case can handle any kind of I, and A(:,kA) of any // properties. For a fine task, A(:,kA) has not been // sliced; I has been sliced instead. // If the I bucket inverse has not been created, this // method is the only option. Alternatively, if nI = // length (I) is << nnz (A (:,kA)), then scanning I and // doing a binary search of A (:,kA) is faster than doing a // linear-time search of A(:,kA) and a lookup into the I // bucket inverse. // The vector of C is constructed in sorted order, so no // sort is needed. // A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I, in order, and search for the entry in A(:,kA) for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A (i,kA), if it exists. // i = I [inew] ; or from a colon expression int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; bool found ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; #if defined ( GB_SYMBOLIC ) bool is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found, nzombies, is_zombie) ; #else GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ; #endif if (found) { ASSERT (i == GB_Ai (pleft)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pleft) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 7 : // I is ibegin:iinc:iend with iinc > 1 //-------------------------------------------------------------- // Time: 1 thread: C=A(1:2:n,:) is 3x slower than MATLAB // but has good speedup. About as fast as MATLAB with // enough threads. ASSERT (Ikind == GB_STRIDE && iinc > 1) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (ibegin <= i && i <= iend) ; i = i - ibegin ; if (i % iinc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / iinc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 8 : // I = ibegin:(-iinc):iend, with iinc < -1 //---------------------------------------------------------- // Time: 2x slower than MATLAB for iinc = -2 or -8. // Good speedup though. Faster than MATLAB for // large values (iinc = -128). ASSERT (Ikind == GB_STRIDE && iinc < -1) ; for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (iend <= i && i <= ibegin) ; i = ibegin - i ; if (i % inc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / inc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 9 : // I = ibegin:(-1):iend //---------------------------------------------------------- // Time: much faster than MATLAB. Good speedup. ASSERT (Ikind == GB_STRIDE && iinc == -1) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) is present int64_t i = GB_Ai (pA + k) ; int64_t inew = (ibegin - i) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; } #endif break ; //-------------------------------------------------------------- case 10 : // I unsorted, and C needs qsort, duplicates OK //-------------------------------------------------------------- // Time: with one thread: 2x slower than MATLAB, probably // because of the qsort. Good speedup however. This used // if qsort is needed but ndupl == 0. Try a method that // needs qsort, but no duplicates? // Case 10 works well when I has many entries and A(:,kA) // has few entries. C(:,kC) must be sorted after this pass. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } // TODO: skip the sort if C is allowed to be jumbled on // output. Flag C as jumbled instead. #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; if (!fine_task) { // a coarse task owns this entire C(:,kC) vector, so // the sort can be done now. The sort for vectors // handled by multiple fine tasks must wait until all // task are completed, below in the post sort. pC = Cp [kC] ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } #endif break ; //-------------------------------------------------------------- case 11 : // I not contiguous, with duplicates. No qsort needed //-------------------------------------------------------------- // Case 11 works well when I has many entries and A(:,kA) // has few entries. It requires that I be sorted on input, // so that no sort is required for C(:,kC). It is // otherwise identical to Case 10. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 12 : // I not contiguous, no duplicates. No qsort needed. //-------------------------------------------------------------- // Identical to Case 11, except GB_for_each_index_in_bucket // just needs to iterate 0 or 1 times. Works well when I // has many entries and A(:,kA) has few entries. ASSERT (Ikind == GB_LIST && nduplicates == 0) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // bucket i has at most one index inew such that // i == I [inew] int64_t inew = Mark [i] - 1 ; if (inew >= 0) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- default: ; //-------------------------------------------------------------- } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = clen ; } else { Cp [kC] = clen ; } #endif } } //-------------------------------------------------------------------------- // phase2: post sort for any vectors handled by fine tasks with method 10 //-------------------------------------------------------------------------- // TODO: skip the sort if C is allowed to be jumbled on output. // Flag C as jumbled instead. #if defined ( GB_PHASE_2_OF_2 ) if (post_sort) { int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kC = TaskList [taskid].kfirst ; bool do_post_sort = (TaskList [taskid].len != 0) ; if (do_post_sort) { // This is the first fine task with method 10 for C(:,kC). The // vector C(:,kC) must be sorted, since method 10 left it with // unsorted indices. int64_t pC = Cp [kC] ; int64_t clen = Cp [kC+1] - pC ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } } } #endif } #undef GB_Ai #undef GB_for_each_index_in_bucket #undef GB_COPY_RANGE #undef GB_COPY_ENTRY #undef GB_CTYPE #undef GB_CSIZE1 #undef GB_CSIZE2
uts.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /**********************************************************************************************/ /* * Copyright (c) 2007 The Unbalanced Tree Search (UTS) Project Team: * ----------------------------------------------------------------- * * This file is part of the unbalanced tree search benchmark. This * project is licensed under the MIT Open Source license. See the LICENSE * file for copyright and licensing information. * * UTS is a collaborative project between researchers at the University of * Maryland, the University of North Carolina at Chapel Hill, and the Ohio * State University. * * University of Maryland: * Chau-Wen Tseng(1) <tseng at cs.umd.edu> * * University of North Carolina, Chapel Hill: * Jun Huan <huan, * Jinze Liu liu, * Stephen Olivier olivier, * Jan Prins* prins at cs.umd.edu> * * The Ohio State University: * James Dinan <dinan, * Gerald Sabin sabin, * P. Sadayappan* saday at cse.ohio-state.edu> * * Supercomputing Research Center * D. Pryor * * (1) - indicates project PI * * UTS Recursive Depth-First Search (DFS) version developed by James Dinan * * Adapted for OpenMP 3.0 Task-based version by Stephen Olivier * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <sys/time.h> #include "app-desc.h" #include "bots.h" #include "uts.h" /*********************************************************** * Global state * ***********************************************************/ counter_t nLeaves = 0; int maxTreeDepth = 0; /*********************************************************** * tree generation and search parameters * * * * Tree generation strategy is controlled via various * * parameters set from the command line. The parameters * * and their default values are given below. * ***********************************************************/ char * uts_trees_str[] = { "Binomial" }; /*********************************************************** * Tree type * Trees are generated using a Galton-Watson process, in * which the branching factor of each node is a random * variable. * * The random variable follow a binomial distribution. ***********************************************************/ tree_t type = BIN; // Default tree type double b_0 = 4.0; // default branching factor at the root int rootId = 0; // default seed for RNG state at root /*********************************************************** * Tree type BIN (BINOMIAL) * The branching factor at the root is specified by b_0. * The branching factor below the root follows an * identical binomial distribution at all nodes. * A node has m children with prob q, or no children with * prob (1-q). The expected branching factor is q * m. * * Default parameter values ***********************************************************/ int nonLeafBF = 4; // m double nonLeafProb = 15.0 / 64.0; // q /*********************************************************** * compute granularity - number of rng evaluations per * tree node ***********************************************************/ int computeGranularity = 1; /*********************************************************** * expected results for execution ***********************************************************/ counter_t exp_tree_size = 0; int exp_tree_depth = 0; counter_t exp_num_leaves = 0; /*********************************************************** * FUNCTIONS * ***********************************************************/ // Interpret 32 bit positive integer as value on [0,1) double rng_toProb(int n) { if (n < 0) { printf("*** toProb: rand n = %d out of range\n",n); } return ((n<0)? 0.0 : ((double) n)/2147483648.0); } void uts_initRoot(Node * root, int type) { root->height = 0; root->numChildren = -1; // means not yet determined rng_init(root->state.state, rootId); bots_message("Root node of type %d at %p\n",type, root); } int uts_numChildren_bin(Node * parent) { // distribution is identical everywhere below root int v = rng_rand(parent->state.state); double d = rng_toProb(v); return (d < nonLeafProb) ? nonLeafBF : 0; } int uts_numChildren(Node *parent) { int numChildren = 0; /* Determine the number of children */ if (parent->height == 0) numChildren = (int) floor(b_0); else numChildren = uts_numChildren_bin(parent); // limit number of children // only a BIN root can have more than MAXNUMCHILDREN if (parent->height == 0) { int rootBF = (int) ceil(b_0); if (numChildren > rootBF) { bots_debug("*** Number of children of root truncated from %d to %d\n", numChildren, rootBF); numChildren = rootBF; } } else { if (numChildren > MAXNUMCHILDREN) { bots_debug("*** Number of children truncated from %d to %d\n", numChildren, MAXNUMCHILDREN); numChildren = MAXNUMCHILDREN; } } return numChildren; } /*********************************************************** * Recursive depth-first implementation * ***********************************************************/ int getNumRootChildren(Node *root) { int numChildren; numChildren = uts_numChildren(root); root->numChildren = numChildren; return numChildren; } counter_t parallel_uts ( Node *root ) { counter_t num_nodes; bots_message("Computing Unbalance Tree Search algorithm "); #pragma omp parallel #pragma omp single nowait #pragma omp task untied num_nodes = parTreeSearch( 0, root, getNumRootChildren(root) ); bots_message(" completed!"); return num_nodes; } #if defined (IF_CUTOFF) counter_t parTreeSearch(int depth, Node *parent, int numChildren) { Node n[numChildren], *nodePtr; int i, j; counter_t subtreesize = 1, partialCount[numChildren]; // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); #pragma omp task firstprivate(i, nodePtr) shared(partialCount) untied if (depth < bots_cutoff_value) partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } #pragma omp taskwait for (i = 0; i < numChildren; i++) { subtreesize += partialCount[i]; } return subtreesize; } #elif defined (MANUAL_CUTOFF) counter_t parTreeSearch(int depth, Node *parent, int numChildren) { Node n[numChildren], *nodePtr; int i, j; counter_t subtreesize = 1, partialCount[numChildren]; if ( depth < bots_cutoff_value ) { // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); #pragma omp task firstprivate(i, nodePtr) shared(partialCount) untied partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } } else { // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } } #pragma omp taskwait for (i = 0; i < numChildren; i++) { subtreesize += partialCount[i]; } return subtreesize; } #else counter_t parTreeSearch(int depth, Node *parent, int numChildren) { //JK //Node n[numChildren], *nodePtr; Node *n, *nodePtr; int i, j; counter_t subtreesize = 1; counter_t *partialCount; //counter_t partialCount[numChildren]; n = (Node*)malloc(numChildren * sizeof(Node)); partialCount = (counter_t*)malloc(numChildren * sizeof(counter_t)); // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); #pragma omp task firstprivate(i, nodePtr) shared(partialCount) untied partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } #pragma omp taskwait for (i = 0; i < numChildren; i++) { subtreesize += partialCount[i]; } free(n); free(partialCount); return subtreesize; } #endif void uts_read_file ( char *filename ) { FILE *fin; if ((fin = fopen(filename, "r")) == NULL) { bots_message( "Could not open input file (%s)\n", filename); exit (-1); } fscanf(fin,"%lf %lf %d %d %d %llu %d %llu", &b_0, &nonLeafProb, &nonLeafBF, &rootId, &computeGranularity, &exp_tree_size, &exp_tree_depth, &exp_num_leaves ); fclose(fin); computeGranularity = max(1,computeGranularity); // Printing input data bots_message("\n"); bots_message("Root branching factor = %f\n", b_0); bots_message("Root seed (0 <= 2^31) = %d\n", rootId); bots_message("Probability of non-leaf node = %f\n", nonLeafProb); bots_message("Number of children for non-leaf node = %d\n", nonLeafBF); bots_message("E(n) = %f\n", (double) ( nonLeafProb * nonLeafBF ) ); bots_message("E(s) = %f\n", (double) ( 1.0 / (1.0 - nonLeafProb * nonLeafBF) ) ); bots_message("Compute granularity = %d\n", computeGranularity); bots_message("Tree type = %d (%s)\n", type, uts_trees_str[type]); bots_message("Random number generator = "); rng_showtype(); } void uts_show_stats( void ) { int nPes = atoi(bots_resources); int chunkSize = 0; bots_message("\n"); bots_message("Tree size = %llu\n", (unsigned long long) bots_number_of_tasks ); bots_message("Maximum tree depth = %d\n", maxTreeDepth ); bots_message("Chunk size = %d\n", chunkSize ); bots_message("Number of leaves = %llu (%.2f%%)\n", nLeaves, nLeaves/(float)bots_number_of_tasks*100.0 ); bots_message("Number of PE's = %.4d threads\n", nPes ); bots_message("Wallclock time = %.3f sec\n", bots_time_program ); bots_message("Overall performance = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program) ); bots_message("Performance per PE = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program / nPes) ); } int uts_check_result ( void ) { int answer = BOTS_RESULT_SUCCESSFUL; if ( bots_number_of_tasks != exp_tree_size ) { answer = BOTS_RESULT_UNSUCCESSFUL; bots_message("Tree size value is non valid.\n"); } return answer; }
yescrypt-opt_c.h
/*- * Copyright 2009 Colin Percival * Copyright 2013,2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #ifdef __i386__ #warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Enable at least SSE2 in the C compiler and use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (portability to older CPUs or testing)." #elif defined(__x86_64__) #warning "This implementation does not use SIMD, and thus it runs a lot slower than the SIMD-enabled implementation. Use yescrypt-best.c instead unless you're building this SIMD-less implementation on purpose (for testing only)." #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include "sha256.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform_c.h" static inline void blkcpy(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; } while (count -= 4); } static inline void blkxor(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; } while (count -= 4); } typedef union { uint32_t w[16]; uint64_t d[8]; } salsa20_blk_t; static inline void salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32); COMBINE(0, 0, 2) COMBINE(1, 5, 7) COMBINE(2, 2, 4) COMBINE(3, 7, 1) COMBINE(4, 4, 6) COMBINE(5, 1, 3) COMBINE(6, 6, 0) COMBINE(7, 3, 5) #undef COMBINE } static inline void salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->w[out * 2] = Bin->d[in1]; \ Bout->w[out * 2 + 1] = Bin->d[in2] >> 32; COMBINE(0, 0, 6) COMBINE(1, 5, 3) COMBINE(2, 2, 0) COMBINE(3, 7, 5) COMBINE(4, 4, 2) COMBINE(5, 1, 7) COMBINE(6, 6, 4) COMBINE(7, 3, 1) #undef COMBINE } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint64_t B[8]) { size_t i; salsa20_blk_t X; #define x X.w salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } #undef x { salsa20_blk_t Y; salsa20_simd_shuffle(&X, &Y); for (i = 0; i < 16; i += 4) { ((salsa20_blk_t *)B)->w[i] += Y.w[i]; ((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1]; ((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2]; ((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3]; } } } /** * blockmix_salsa8(Bin, Bout, X, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. The * temporary space X must be 64 bytes. */ static void blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r) { size_t i; /* 1: X <-- B_{2r - 1} */ blkcpy(X, &Bin[(2 * r - 1) * 8], 8); /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < 2 * r; i += 2) { /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4], X, 8); /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8 + 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4 + r * 8], X, 8); } } /* These are tunable */ #define S_BITS 8 #define S_SIMD 2 #define S_P 4 #define S_ROUNDS 6 /* Number of S-boxes. Not tunable, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable on their own. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD) #define S_P_SIZE (S_P * S_SIMD) #define S_MIN_R ((S_P * S_SIMD + 15) / 16) /** * pwxform(B): * Transform the provided block using the provided S-boxes. */ static void block_pwxform(uint64_t * B, const uint64_t * S) { uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B; const uint8_t *S0 = (const uint8_t *)S; const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD); size_t i, j; #if S_SIMD > 2 size_t k; #endif for (j = 0; j < S_P; j++) { uint64_t *Xj = X[j]; uint64_t x0 = Xj[0]; #if S_SIMD > 1 uint64_t x1 = Xj[1]; #endif for (i = 0; i < S_ROUNDS; i++) { uint64_t x = x0 & S_MASK2; const uint64_t *p0, *p1; p0 = (const uint64_t *)(S0 + (uint32_t)x); p1 = (const uint64_t *)(S1 + (x >> 32)); x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0; x0 += p0[0]; x0 ^= p1[0]; #if S_SIMD > 1 x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1; x1 += p0[1]; x1 ^= p1[1]; #endif #if S_SIMD > 2 for (k = 2; k < S_SIMD; k++) { x = Xj[k]; x = (uint64_t)(x >> 32) * (uint32_t)x; x += p0[k]; x ^= p1[k]; Xj[k] = x; } #endif } Xj[0] = x0; #if S_SIMD > 1 Xj[1] = x1; #endif } } /** * blockmix_pwxform(Bin, Bout, S, r): * Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. * * S lacks const qualifier to match blockmix_salsa8()'s prototype, which we * need to refer to both functions via the same function pointers. */ static void blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r) { size_t r1, r2, i; /* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */ r1 = r * 128 / (S_P_SIZE * 8); /* X <-- B_{r1 - 1} */ blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE); /* X <-- X \xor B_i */ blkxor(Bout, Bin, S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(Bout, S); /* for i = 0 to r1 - 1 do */ for (i = 1; i < r1; i++) { /* X <-- X \xor B_i */ blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE], S_P_SIZE); blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(&Bout[i * S_P_SIZE], S); } /* Handle partial blocks */ if (i * S_P_SIZE < r * 16) blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], r * 16 - i * S_P_SIZE); i = (r1 - 1) * S_P_SIZE / 8; /* Convert 128-byte blocks to 64-byte blocks */ r2 = r * 2; /* B'_i <-- H(B'_i) */ salsa20_8(&Bout[i * 8]); i++; for (; i < r2; i++) { /* B'_i <-- H(B'_i \xor B'_{i-1}) */ blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8); salsa20_8(&Bout[i * 8]); } } /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint64_t integerify(const uint64_t * B, size_t r) { /* * Our 64-bit words are in host byte order, and word 6 holds the second 32-bit * word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also * in host byte order, as it should be. */ const uint64_t * X = &B[(2 * r - 1) * 8]; uint32_t lo = X[0]; uint32_t hi = X[6] >> 32; return ((uint64_t)hi << 32) + lo; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be even and * no smaller than 2. */ static void smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 16 * r; uint64_t * X = V; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t n, i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ blockmix(X, Y, Z, r); blkcpy(&V[s], Y, s); X = XY; if (NROM && (VROM_mask & 1)) { if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i + 1 - n; /* X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); } blockmix(Y, X, Z, r); } } else { yescrypt_flags_t rw = flags & YESCRYPT_RW; /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if (rw) { if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); if (rw) { /* j <-- Wrap(Integerify(X), i) */ j = integerify(Y, r) & (n - 1); j += (i + 1) - n; /* X <-- X \xor V_j */ blkxor(Y, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); } } /* B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The value Nloop must be even. */ static void smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1 | 1; size_t s = 16 * r; yescrypt_flags_t rw = flags & YESCRYPT_RW; uint64_t * X = XY; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t i, j; size_t k; if (Nloop == 0) return; /* X <-- B' */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* 7: j <-- Integerify(X) mod N */ j &= N - 1; /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); } blockmix(Y, X, Z, r); } } else { /* 6: for i = 0 to N - 1 do */ i = Nloop / 2; do { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); blockmix(Y, X, Z, r); } while (--i); } /* 10: B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage * XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is * required with OpenMP-enabled builds). The value N must be a power of 2 * greater than 1. */ static void smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { size_t s = 16 * r; uint64_t Nchunk = N / p, Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint64_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t Vchunk = i * Nchunk; uint64_t * Bp = &B[i * s]; uint64_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 16, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t * Bp = &B[i * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ static int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint64_t * B, * V, * XY, * S; uint64_t sha256[4]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) { errno = EINVAL; return -1; } #if S_MIN_R > 1 if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) { errno = EINVAL; return -1; } #endif if ((p > SIZE_MAX / ((size_t)256 * r + 64)) || #if SIZE_MAX / 256 <= UINT32_MAX (r > SIZE_MAX / 256) || #endif (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (N > UINT64_MAX / ((uint64_t)t + 1)) { errno = EFBIG; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (uint64_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r + 64; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL * sizeof(*S); #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint64_t *)tmp.aligned; XY = (uint64_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint64_t *)local->aligned; V = (uint64_t *)((uint8_t *)B + B_size); XY = (uint64_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint64_t *)((uint8_t *)XY + XY_size); if (t || flags) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, passwd, passwdlen); SHA256_Final((uint8_t *)sha256, &ctx); passwd = (uint8_t *)sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, (uint8_t *)B, B_size); if (t || flags) blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0])); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, &V[(size_t)16 * r * i * N], NROM, shared, &XY[((size_t)32 * r + 8) * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX ctx; HMAC_SHA256_Init(&ctx, buf, buflen); HMAC_SHA256_Update(&ctx, "PPTPPubKey", 10); HMAC_SHA256_Final((uint8_t *)sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, (uint8_t *)sha256, sizeof(sha256)); SHA256_Final(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
HDAA_fmt_plug.c
/* HTTP Digest access authentication patch for john * * Written by Romain Raboin. OMP and intrinsics support by magnum * * This software is Copyright (c) 2008 Romain Raboin - romain.raboin at * gmail.com, and Copyright (c) 2012 magnum and it is hereby released to * the general public under the following terms: Redistribution and * use in source and binary forms, with or without modification, are * permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_HDAA; #elif FMT_REGISTERS_H john_register_one(&fmt_HDAA); #else #include <stdint.h> #include <string.h> #ifdef __MMX__ #include <mmintrin.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "simd-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if !FAST_FORMATS_OMP #undef _OPENMP #endif #if defined(_OPENMP) #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "hdaa" #define FORMAT_NAME "HTTP Digest access authentication" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(reqinfo_t) #define SALT_ALIGN 4 #if defined(_OPENMP) static unsigned int omp_t = 1; #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #endif #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&0x1c)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SEPARATOR '$' #define MAGIC "$response$" #define MAGIC_LEN (sizeof(MAGIC)-1) #define SIZE_TAB 12 // This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5 #define HTMP 512 typedef struct { size_t h1tmplen; size_t h3tmplen; char h1tmp[HTMP]; char h3tmp[HTMP]; } reqinfo_t; /* digest authentication scheme : h1 = md5(user:realm:password) h2 = md5(method:digestURI) response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2) */ /* request information */ enum e_req { R_RESPONSE, R_USER, R_REALM, R_METHOD, R_URI, R_NONCE, R_NONCECOUNT, R_CLIENTNONCE, R_QOP }; /* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */ static struct fmt_tests tests[] = { {"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"}, {"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"}, {"$response$56940f87f1f53ade8b7d3c5a102c2bf3$usrx$teN__chars$GET$/4TLHS1TMN9cfsbqSUAdTG3CRq7qtXMptnYfn7mIIi3HRKOMhOks56e$2c0366dcbc$00000001$0153$auth", "passWOrd"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$", "pass"}, {"$response$8663faf2337dbcb2c52882807592ec2c$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a", "pass"}, {NULL} }; /* used by set_key */ static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_COEF_32 #define LIMBS 9 static unsigned char *saved_key[LIMBS]; static unsigned int *interm_key; static unsigned int *crypt_key; #else static int (*saved_len); static unsigned char (*crypt_key)[BINARY_SIZE]; #endif /* Store information about the request ()*/ static reqinfo_t *rinfo = NULL; static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 for (i = 0; i < LIMBS; i++) saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); interm_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { #ifdef SIMD_COEF_32 int i; #endif MEM_FREE(saved_plain); MEM_FREE(crypt_key); #ifdef SIMD_COEF_32 MEM_FREE(interm_key); for (i = 0; i < LIMBS; i++) MEM_FREE(saved_key[i]); #else MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, MAGIC, MAGIC_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += MAGIC_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (!ishexlc(p) || strlen(p) != 32) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* user */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* realm */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* method */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* uri */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* nonce */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* End of legacy HDAA or noncecount */ goto end_hdaa_legacy; if ((p = strtokm(NULL, "$")) == NULL) /* clientnonce */ goto err; if (!ishexlc(p) ) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* qop */ goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; end_hdaa_legacy: MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // Normalize shorter hashes, to allow with or without trailing '$' character. static char *split(char *ciphertext, int index, struct fmt_main *self) { char *cp; if (strncmp(ciphertext, MAGIC, MAGIC_LEN)) return ciphertext; cp = ciphertext + MAGIC_LEN; cp = strchr(cp, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; cp = strchr(cp+1, '$'); if (!cp) return ciphertext; // now if we have $binary_hash$ then we remove the last '$' char if (strlen(cp) == 1 + BINARY_SIZE*2 + 1) { static char out[256]; strnzcpy(out, ciphertext, sizeof(out)); out[strlen(out)-1] = 0; return out; } return ciphertext; } static void set_salt(void *salt) { rinfo = salt; } static void set_key(char *key, int index) { strcpy(saved_plain[index], key); #ifndef SIMD_COEF_32 saved_len[index] = -1; #endif } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for (; y < SIMD_PARA_MD5 * omp_t; y++) #else for (; y < SIMD_PARA_MD5; y++) #endif for (x = 0; x < SIMD_COEF_32; x++) { if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!(memcmp(binary, crypt_key[index], BINARY_SIZE))) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int i,x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; for (i=0;i<(BINARY_SIZE/4);i++) if ( ((uint32_t*)binary)[i] != ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] ) return 0; return 1; #else return !(memcmp(binary, crypt_key[index], BINARY_SIZE)); #endif } static int cmp_exact(char *source, int index) { return 1; } /* convert hash from binary to ascii */ #ifdef SIMD_COEF_32 // This code should be rewritten in intrinsics, reading from // MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers. inline static void sse_bin2ascii(unsigned char *conv, unsigned char *src) { unsigned int index; for (index = 0; index < NBKEYS; index++) { unsigned int i, j = 0; for (i = 0; i < BINARY_SIZE; i += 2) { unsigned int t; t = (src[GETOUTPOS((i + 1), index)] & 0x0f); t <<= 12; t |= (src[GETOUTPOS((i + 1), index)] & 0xf0); t <<= 4; t |= (src[GETOUTPOS(i, index)] & 0x0f); t <<= 8; t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4); t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); *(unsigned int*)&conv[GETPOS(j, index)] = t; j+=4; } } } #endif /* SIMD_COEF_32 */ #ifdef __MMX__ inline static void bin2ascii(__m64 *conv, __m64 *src) { unsigned int i = 0; while (i != 4) { __m64 l; __m64 r; __m64 t; __m64 u; __m64 v; /* 32 bits to 64 bits */ t = _mm_set1_pi32(0x0f0f0f0f); /* Bit-wise AND the 64-bit values in M1 and M2. */ u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t); v = _mm_and_si64(src[(i / 2)], t); /* interleaving */ l = _mm_unpacklo_pi8(u, v); r = _mm_unpackhi_pi8(u, v); t = _mm_set1_pi32(0x06060606); l = _mm_add_pi32(l, t); r = _mm_add_pi32(r, t); t = _mm_set1_pi32(0x01010101); /* u = (l << 4) & t */ u = _mm_and_si64(_mm_srli_si64(l, 4), t); /* v = (r << 4) & t */ v = _mm_and_si64(_mm_srli_si64(r, 4), t); t = _mm_set1_pi32(0x00270027); /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ u = _mm_mullo_pi16(u, t); v = _mm_mullo_pi16(v, t); t = _mm_set1_pi32(0x2a2a2a2a); u = _mm_add_pi32(u, t); v = _mm_add_pi32(v, t); conv[(i++)] = _mm_add_pi32(l, u); conv[(i++)] = _mm_add_pi32(r, v); } __asm__ __volatile__("emms"); } #else inline static void bin2ascii(uint32_t *conv, uint32_t *source) { unsigned char *src = (unsigned char*)source; unsigned int i; unsigned int j = 0; uint32_t t = 0; for (i = 0; i < BINARY_SIZE; i += 2) { #if (ARCH_LITTLE_ENDIAN == 0) t = (src[i] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[(i + 1)] & 0x0f); #else t = (src[(i + 1)] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x100; t += ((src[i] & 0xf0) >> 4); #endif t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); conv[(j++)] = t; } } #endif /* MMX */ #if SIMD_COEF_32 inline static void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; for (i = 0; i < BINARY_SIZE / 4; i++) { *d = *s; s += SIMD_COEF_32; d += SIMD_COEF_32; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if SIMD_COEF_32 #if defined(_OPENMP) #define ti (thread*NBKEYS+index) int thread; #pragma omp parallel for for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++) #else #define thread 0 #define ti index #endif { static unsigned int crypt_len[NBKEYS]; unsigned int index, i, shortest, longest; for (index = 0; index < NBKEYS; index++) { int len; char temp; const char *key; key = rinfo->h1tmp; for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4) *(uint32_t*)&saved_key[len>>6][GETPOS(len, ti)] = *(uint32_t*)key; len = rinfo->h1tmplen; key = (char*)&saved_plain[ti]; while((temp = *key++)) { saved_key[len>>6][GETPOS(len, ti)] = temp; len++; } saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(uint32_t*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; } SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]); longest = 0; shortest = HTMP; for (index = 0; index < NBKEYS; index++) { const char *key; int i, len; len = CIPHERTEXT_LENGTH - 1; key = rinfo->h3tmp + CIPHERTEXT_LENGTH; // Copy a char at a time until aligned at destination while (++len & 3) saved_key[len>>6][GETPOS(len, ti)] = *key++; // ...then a word at a time. This is a good boost, we are copying over 100 bytes. for (;len < rinfo->h3tmplen; len += 4, key += 4) *(uint32_t*)&saved_key[len>>6][GETPOS(len, ti)] = *(uint32_t*)key; len = rinfo->h3tmplen; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; //for (; i < (((len+8)>>6)+1)*64; i += 4) for (; i <= crypt_len[index]; i += 4) *(uint32_t*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; crypt_len[index] = len; if (len > longest) longest = len; if (len < shortest) shortest = len; } // First limb SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); // Copy any output that is done now if (shortest < 56) { if (longest < 56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done(interm_key, crypt_key, ti); } // Do the rest of the limbs for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN); // Copy any output that is done now if (shortest < i*64+56) { if (shortest > (i-1)*64+55 && longest < i*64+56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done(interm_key, crypt_key, ti); } } } #undef thread #undef ti #else int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len; #ifdef _OPENMP char h3tmp[HTMP]; char h1tmp[HTMP]; #else char *h3tmp; char *h1tmp; #endif size_t tmp; #ifdef __MMX__ __m64 h1[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else uint32_t h1[BINARY_SIZE / sizeof(uint32_t)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif tmp = rinfo->h1tmplen; if ((len = saved_len[index]) < 0) len = saved_len[index] = strlen(saved_plain[index]); #ifdef _OPENMP memcpy(h1tmp, rinfo->h1tmp, tmp); memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH); #else h3tmp = rinfo->h3tmp; h1tmp = rinfo->h1tmp; #endif memcpy(&h1tmp[tmp], saved_plain[index], len); MD5_Init(&ctx); MD5_Update(&ctx, h1tmp, len + tmp); MD5_Final((unsigned char*)h1, &ctx); bin2ascii(conv, h1); memcpy(h3tmp, conv, CIPHERTEXT_LENGTH); MD5_Init(&ctx); MD5_Update(&ctx, h3tmp, rinfo->h3tmplen); MD5_Final(crypt_key[index], &ctx); } #endif return count; } static char *mystrndup(const char *s, size_t n) { size_t tmp; size_t size; char *ret; for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++); size = n; if (tmp < size) size = tmp; if ((ret = mem_alloc(sizeof(char) * size + 1)) == NULL) return NULL; memmove(ret, s, size); ret[size] = 0; return ret; } static size_t reqlen(char *str) { size_t len; for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++); return len; } static void *get_salt(char *ciphertext) { int nb; int i; char *request[SIZE_TAB]; char *str; static reqinfo_t *r; #ifdef __MMX__ __m64 h2[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else unsigned int h2[BINARY_SIZE / sizeof(unsigned int)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif MD5_CTX ctx; /* parse the password string */ if (!r) r = mem_alloc_tiny(sizeof(*r), MEM_ALIGN_WORD); memset(r, 0, sizeof(*r)); for (nb = 0, i = 1; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { i++; request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i])); nb++; if (!ciphertext[i]) break; } } while (nb < SIZE_TAB) { request[nb++] = NULL; } /* calculate h2 (h2 = md5(method:digestURI))*/ str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2); sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]); MD5_Init(&ctx); MD5_Update(&ctx, str, strlen(str)); MD5_Final((unsigned char*)h2, &ctx); memset(conv, 0, CIPHERTEXT_LENGTH + 1); bin2ascii(conv, h2); MEM_FREE(str); /* create a part of h1 (h1tmp = request:realm:)*/ snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]); /* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/ if (request[R_CLIENTNONCE] == NULL) snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s", request[R_NONCE], (char*)conv); else snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s", request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE], request[R_QOP], (char*)conv); r->h1tmplen = strlen(r->h1tmp); r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH; for (nb=0; nb < SIZE_TAB; ++nb) { MEM_FREE(request[nb]); } return r; } /* convert response to binary form */ static void *get_binary(char *ciphertext) { static unsigned int realcipher[BINARY_SIZE / sizeof(int)]; int i; ciphertext += 10; for (i = 0; i < BINARY_SIZE; i++) { ((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } return (void*) realcipher; } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t *)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(uint32_t*)&crypt_key[index] & PH_MASK_6; } #endif struct fmt_main fmt_HDAA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { MAGIC }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
tutorial_region_prof.c
/* * Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <stdint.h> #include <mpi.h> #ifdef _OPENMP #include <omp.h> #endif #include <geopm.h> #include "tutorial_region.h" #ifdef _OPENMP static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; int err = 0; #pragma omp parallel for for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } #pragma omp parallel for for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return err; } #endif static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; geopm_tprof_init(num_block); for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_tprof_post(); } for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return 0; } int tutorial_stream_profiled(double big_o, int do_report) { int err = 0; if (big_o != 0.0) { size_t cline_size = 64; size_t num_stream = (size_t)big_o * 500000000; size_t mem_size = sizeof(double) * num_stream; double *a = NULL; double *b = NULL; double *c = NULL; double scalar = 3.0; uint64_t stream_rid; if (!err) { err = geopm_prof_region("tutorial_stream", GEOPM_REGION_HINT_MEMORY, &stream_rid); } err = posix_memalign((void *)&a, cline_size, mem_size); if (!err) { err = posix_memalign((void *)&b, cline_size, mem_size); } if (!err) { err = posix_memalign((void *)&c, cline_size, mem_size); } if (!err) { #pragma omp parallel for for (int i = 0; i < num_stream; i++) { a[i] = 0.0; b[i] = 1.0; c[i] = 2.0; } if (do_report) { printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream); fflush(stdout); } err = geopm_prof_enter(stream_rid); } if (!err) { #ifdef _OPENMP err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c); #else err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c); #endif } if (!err) { err = geopm_prof_exit(stream_rid); } if (!err) { free(c); free(b); free(a); } } }
mmul_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include <stdlib.h> #include "sptensor.h" int sptOmpSparseTensorMulMatrix(sptSemiSparseTensor *Y, sptSparseTensor *X, const sptMatrix *U, sptIndex const mode) { int result; sptIndex *ind_buf; sptIndex m; sptNnzIndexVector fiberidx; if(mode >= X->nmodes) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns * Mtx", "shape mismatch"); } if(X->ndims[mode] != U->nrows) { spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns * Mtx", "shape mismatch"); } sptSparseTensorSortIndexAtMode(X, mode, 0, 1); ind_buf = malloc(X->nmodes * sizeof *ind_buf); spt_CheckOSError(!ind_buf, "OMP SpTns * Mtx"); for(m = 0; m < X->nmodes; ++m) { ind_buf[m] = X->ndims[m]; } ind_buf[mode] = U->ncols; result = sptNewSemiSparseTensor(Y, X->nmodes, mode, ind_buf); free(ind_buf); spt_CheckError(result, "OMP SpTns * Mtx", NULL); sptSemiSparseTensorSetIndices(Y, &fiberidx, X); sptTimer timer; sptNewTimer(&timer, 0); sptStartTimer(timer); struct timeval start_t, end_t; //声明时间结构体变量 // 时间 gettimeofday(&start_t, NULL); //记录开始的时间 //--- #pragma omp parallel for for(sptNnzIndex i = 0; i < Y->nnz; ++i) { sptNnzIndex inz_begin = fiberidx.data[i]; sptNnzIndex inz_end = fiberidx.data[i+1]; // jli: exchange two loops for(sptNnzIndex j = inz_begin; j < inz_end; ++j) { sptIndex r = X->inds[mode].data[j]; for(sptIndex k = 0; k < U->ncols; ++k) { Y->values.values[i*Y->stride + k] += X->values.data[j] * U->values[r*U->stride + k]; } } } //{需要计时的代码块} //--- gettimeofday(&end_t, NULL); //记录结束的时间 double total_t_sec, total_t_usec, total_t; //变量声明 total_t_sec = (double)(end_t.tv_sec - start_t.tv_sec); //计算秒数 total_t_usec = (double)(end_t.tv_usec - start_t.tv_usec); //计算微秒数 total_t = total_t_sec + total_t_usec / 1000000.0; //计算总时间 printf("cost time:%lf \n",total_t); sptStopTimer(timer); // sptPrintElapsedTime(timer, "OMP SpTns * Mtx"); sptFreeTimer(timer); sptFreeNnzIndexVector(&fiberidx); return 0; }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine = cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return(cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RobidouxSharp is a slightly sharper version of Robidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Robidoux and % RobidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support); /* Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double) resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double) support); if ((filter_type == CubicFilter) || (window_type == CubicFilter)) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)- cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)*PerceptibleReciprocal(resize_filter->blur); /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); if (status == MagickFalse) break; } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { Quantum *magick_restrict p; ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(const Quantum *source,const ssize_t source_offset, Quantum *destination,const ssize_t destination_offset,const size_t channels) { ssize_t i; for (i=0; i < (ssize_t) channels; i++) destination[channels*destination_offset+i]=source[source_offset*channels+i]; } static inline void MixPixels(const Quantum *source,const ssize_t *source_offset, const size_t source_size,Quantum *destination, const ssize_t destination_offset,const size_t channels) { ssize_t sum; ssize_t i; for (i=0; i < (ssize_t) channels; i++) { ssize_t j; sum=0; for (j=0; j < (ssize_t) source_size; j++) sum+=source[source_offset[j]*channels+i]; destination[channels*destination_offset+i]=(Quantum) (sum/source_size); } } static inline void Mix2Pixels(const Quantum *source, const ssize_t source_offset1,const ssize_t source_offset2, Quantum *destination,const ssize_t destination_offset,const size_t channels) { const ssize_t offsets[2] = { source_offset1, source_offset2 }; MixPixels(source,offsets,2,destination,destination_offset,channels); } static inline int PixelsEqual(const Quantum *source1,ssize_t offset1, const Quantum *source2,ssize_t offset2,const size_t channels) { ssize_t i; offset1*=channels; offset2*=channels; for (i=0; i < (ssize_t) channels; i++) if (source1[offset1+i] != source2[offset2+i]) return(0); return(1); } static inline void Eagle2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t i; (void) source; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if (PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,0,result,0,channels); if (PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels)) CopyPixels(pixels,2,result,1,channels); if (PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels)) CopyPixels(pixels,6,result,2,channels); if (PixelsEqual(pixels,5,pixels,8,channels) && PixelsEqual(pixels,8,pixels,7,channels)) CopyPixels(pixels,8,result,3,channels); } static void Hq2XHelper(const unsigned int rule,const Quantum *source, Quantum *destination,const ssize_t destination_offset,const size_t channels, const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d, const ssize_t f,const ssize_t h) { #define caseA(N,A,B,C,D) \ case N: \ { \ const ssize_t \ offsets[4] = { A, B, C, D }; \ \ MixPixels(source,offsets,4,destination,destination_offset,channels);\ break; \ } #define caseB(N,A,B,C,D,E,F,G,H) \ case N: \ { \ const ssize_t \ offsets[8] = { A, B, C, D, E, F, G, H }; \ \ MixPixels(source,offsets,8,destination,destination_offset,channels);\ break; \ } switch (rule) { case 0: { CopyPixels(source,e,destination,destination_offset,channels); break; } caseA(1,e,e,e,a) caseA(2,e,e,e,d) caseA(3,e,e,e,b) caseA(4,e,e,d,b) caseA(5,e,e,a,b) caseA(6,e,e,a,d) caseB(7,e,e,e,e,e,b,b,d) caseB(8,e,e,e,e,e,d,d,b) caseB(9,e,e,e,e,e,e,d,b) caseB(10,e,e,d,d,d,b,b,b) case 11: { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); break; } case 12: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 13: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 14: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 15: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 16: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, e, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 17: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 18: { if (PixelsEqual(source,b,source,f,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, b, b, d }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, d }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } default: { if (PixelsEqual(source,d,source,h,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, d, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } } #undef caseA #undef caseB } static inline unsigned int Hq2XPatternToNumber(const int *pattern) { ssize_t i; unsigned int result, order; result=0; order=1; for (i=7; i >= 0; i--) { result+=order*pattern[i]; order*=2; } return(result); } static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated (void) source; Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); } static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define Corner(A,B,C,D) \ { \ if (intensities[B] > intensities[A]) \ { \ const ssize_t \ offsets[3] = { B, C, D }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ else \ { \ const ssize_t \ offsets[3] = { A, B, C }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ } #define Line(A,B,C,D) \ { \ if (intensities[C] > intensities[A]) \ Mix2Pixels(pixels,C,D,result,3,channels); \ else \ Mix2Pixels(pixels,A,B,result,3,channels); \ } const ssize_t pixels_offsets[4] = { 0, 1, 3, 4 }; MagickFloatType intensities[9]; int ae, bd, ab, ad, be, de; ssize_t i; for (i=0; i < 9; i++) intensities[i]=GetPixelIntensity(source,pixels + i*channels); CopyPixels(pixels,0,result,0,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result, 1,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result, 2,channels); ae=PixelsEqual(pixels,0,pixels,4,channels); bd=PixelsEqual(pixels,1,pixels,3,channels); ab=PixelsEqual(pixels,0,pixels,1,channels); de=PixelsEqual(pixels,3,pixels,4,channels); ad=PixelsEqual(pixels,0,pixels,3,channels); be=PixelsEqual(pixels,1,pixels,4,channels); if (ae && bd && ab) { CopyPixels(pixels,0,result,3,channels); return; } if (ad && de && !ab) { Corner(1,0,4,3) return; } if (be && de && !ab) { Corner(0,1,3,4) return; } if (ad && ab && !be) { Corner(4,3,1,0) return; } if (ab && be && !ad) { Corner(3,0,4,1) return; } if (ae && (!bd || intensities[1] > intensities[0])) { Mix2Pixels(pixels,0,4,result,3,channels); return; } if (bd && (!ae || intensities[0] > intensities[1])) { Mix2Pixels(pixels,1,3,result,3,channels); return; } if (ab) { Line(0,1,3,4) return; } if (de) { Line(3,4,0,1) return; } if (ad) { Line(0,3,1,4) return; } if (be) { Line(1,4,0,3) return; } MixPixels(pixels,pixels_offsets,4,result,3,channels); #undef Corner #undef Line } static void Xbr2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define WeightVar(M,N) const int w_##M##_##N = \ PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1; WeightVar(12,11) WeightVar(12,7) WeightVar(12,13) WeightVar(12,17) WeightVar(12,16) WeightVar(12,8) WeightVar(6,10) WeightVar(6,2) WeightVar(11,7) WeightVar(11,17) WeightVar(11,5) WeightVar(7,13) WeightVar(7,1) WeightVar(12,6) WeightVar(12,18) WeightVar(8,14) WeightVar(8,2) WeightVar(13,17) WeightVar(13,9) WeightVar(7,3) WeightVar(16,10) WeightVar(16,22) WeightVar(17,21) WeightVar(11,15) WeightVar(18,14) WeightVar(18,22) WeightVar(17,23) WeightVar(17,19) #undef WeightVar magick_unreferenced(source); if ( w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) < w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0, channels); else CopyPixels(pixels,12,result,0,channels); if ( w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) < w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8) ) Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1, channels); else CopyPixels(pixels,12,result,1,channels); if ( w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) < w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2, channels); else CopyPixels(pixels,12,result,2,channels); if ( w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) < w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18) ) Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3, channels); else CopyPixels(pixels,12,result,3,channels); } static void Scale2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { magick_unreferenced(source); if (PixelsEqual(pixels,1,pixels,7,channels) || PixelsEqual(pixels,3,pixels,5,channels)) { ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); return; } if (PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if (PixelsEqual(pixels,1,pixels,5,channels)) CopyPixels(pixels,5,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,3,channels); else CopyPixels(pixels,4,result,3,channels); } static void Epbx2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define HelperCond(a,b,c,d,e,f,g) ( \ PixelsEqual(pixels,a,pixels,b,channels) && ( \ PixelsEqual(pixels,c,pixels,d,channels) || \ PixelsEqual(pixels,c,pixels,e,channels) || \ PixelsEqual(pixels,a,pixels,f,channels) || \ PixelsEqual(pixels,b,pixels,g,channels) \ ) \ ) ssize_t i; magick_unreferenced(source); for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if ( !PixelsEqual(pixels,3,pixels,5,channels) && !PixelsEqual(pixels,1,pixels,7,channels) && ( PixelsEqual(pixels,4,pixels,3,channels) || PixelsEqual(pixels,4,pixels,7,channels) || PixelsEqual(pixels,4,pixels,5,channels) || PixelsEqual(pixels,4,pixels,1,channels) || ( ( !PixelsEqual(pixels,0,pixels,8,channels) || PixelsEqual(pixels,4,pixels,6,channels) || PixelsEqual(pixels,3,pixels,2,channels) ) && ( !PixelsEqual(pixels,6,pixels,2,channels) || PixelsEqual(pixels,4,pixels,0,channels) || PixelsEqual(pixels,4,pixels,8,channels) ) ) ) ) { if (HelperCond(1,3,4,0,8,2,6)) Mix2Pixels(pixels,1,3,result,0,channels); if (HelperCond(5,1,4,2,6,8,0)) Mix2Pixels(pixels,5,1,result,1,channels); if (HelperCond(3,7,4,6,2,0,8)) Mix2Pixels(pixels,3,7,result,2,channels); if (HelperCond(7,5,4,8,0,6,2)) Mix2Pixels(pixels,7,5,result,3,channels); } #undef HelperCond } static inline void Eagle3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); if (corner_tl && corner_tr) Mix2Pixels(pixels,0,2,result,1,channels); else CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); if (corner_tl && corner_bl) Mix2Pixels(pixels,0,6,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if (corner_tr && corner_br) Mix2Pixels(pixels,2,8,result,5,channels); else CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); if (corner_bl && corner_br) Mix2Pixels(pixels,6,8,result,7,channels); else CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Eagle3XB(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Scale3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { magick_unreferenced(source); if (!PixelsEqual(pixels,1,pixels,7,channels) && !PixelsEqual(pixels,3,pixels,5,channels)) { if (PixelsEqual(pixels,3,pixels,1,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) || ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,1,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,5,pixels,1,channels)) CopyPixels(pixels,5,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) || ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,3,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if ( ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) ) CopyPixels(pixels,5,result,5,channels); else CopyPixels(pixels,4,result,5,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,6,channels); else CopyPixels(pixels,4,result,6,channels); if ( ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) ) CopyPixels(pixels,7,result,7,channels); else CopyPixels(pixels,4,result,7,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,8,channels); else CopyPixels(pixels,4,result,8,channels); } else { ssize_t i; for (i=0; i < 9; i++) CopyPixels(pixels,4,result,i,channels); } } MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; const char *option; Image *source_image, *magnify_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo rectangle; ssize_t y; unsigned char magnification, width; void (*scaling_method)(const Image *,const Quantum *,Quantum *,size_t); /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); option=GetImageOption(image->image_info,"magnify:method"); if (option == (char *) NULL) option="scale2x"; scaling_method=Scale2X; magnification=1; width=1; switch (*option) { case 'e': { if (LocaleCompare(option,"eagle2x") == 0) { scaling_method=Eagle2X; magnification=2; width=3; break; } if (LocaleCompare(option,"eagle3x") == 0) { scaling_method=Eagle3X; magnification=3; width=3; break; } if (LocaleCompare(option,"eagle3xb") == 0) { scaling_method=Eagle3XB; magnification=3; width=3; break; } if (LocaleCompare(option,"epbx2x") == 0) { scaling_method=Epbx2X; magnification=2; width=3; break; } break; } case 'f': { if (LocaleCompare(option,"fish2x") == 0) { scaling_method=Fish2X; magnification=2; width=3; break; } break; } case 'h': { if (LocaleCompare(option,"hq2x") == 0) { scaling_method=Hq2X; magnification=2; width=3; break; } break; } case 's': { if (LocaleCompare(option,"scale2x") == 0) { scaling_method=Scale2X; magnification=2; width=3; break; } if (LocaleCompare(option,"scale3x") == 0) { scaling_method=Scale3X; magnification=3; width=3; break; } break; } case 'x': { if (LocaleCompare(option,"xbr2x") == 0) { scaling_method=Xbr2X; magnification=2; width=5; } break; } default: break; } /* Make a working copy of the source image and convert it to RGB colorspace. */ source_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (source_image == (Image *) NULL) return((Image *) NULL); offset.x=0; offset.y=0; rectangle.x=0; rectangle.y=0; rectangle.width=image->columns; rectangle.height=image->rows; (void) CopyImagePixels(source_image,image,&rectangle,&offset,exception); (void) SetImageColorspace(source_image,RGBColorspace,exception); magnify_image=CloneImage(source_image,magnification*source_image->columns, magnification*source_image->rows,MagickTrue,exception); if (magnify_image == (Image *) NULL) { source_image=DestroyImage(source_image); return((Image *) NULL); } /* Magnify the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(source_image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,magnify_image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { Quantum r[128]; /* to hold result pixels */ Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y, magnify_image->columns,magnification,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) source_image->columns; x++) { const Quantum *magick_restrict p; size_t channels; ssize_t i; ssize_t j; p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width, exception); channels=GetPixelChannels(source_image); scaling_method(source_image,p,r,channels); /* Copy the result pixels into the final image. */ for (j=0; j < (ssize_t) magnification; j++) for (i=0; i < (ssize_t) (channels*magnification); i++) q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i]; q+=magnification*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? DefaultResolution : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? DefaultResolution : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double x_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double y_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char filename[MagickPathExtent], value[MagickPathExtent]; const char *name; Image *linear_image, *thumbnail_image; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); if (linear_image == (Image *) NULL) return((Image *) NULL); (void) SetImageColorspace(linear_image,RGBColorspace,exception); linear_image->filter=LanczosSharpFilter; thumbnail_image=DistortResizeImage(linear_image,columns,rows,exception); linear_image=DestroyImage(linear_image); if (thumbnail_image == (Image *) NULL) return((Image *) NULL); /* Set sRGB colorspace and remove color profiles and comments. */ (void) SetImageColorspace(thumbnail_image,sRGBColorspace,exception); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); /* Set Thumb properties. */ (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); GetPathComponent(image->magick_filename,TailPath,filename); (void) CopyMagickString(value,filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) (void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double) attributes.st_mtime); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); (void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL, exception); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g", (double) image->magick_columns); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g", (double) image->magick_rows); (void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g", (double) GetImageListLength(image)); return(thumbnail_image); }
saxpy.c
/** * @file saxpy.c * * @mainpage saxpy * * @author Xin Wu (PC²) * @date 05.04.2020 * @copyright CC BY-SA 2.0 * * saxpy performs the \c saxpy operation on host as well as accelerator. * The performance (in MB/s) for different implementations is also compared. * * The \c saxpy operation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are single-precision vectors each with n elements. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <omp.h> #include "hsaxpy.h" #include "asaxpy.h" #include "check1ns.h" #include "wtcalc.h" #define TWO26 (1 << 26) #define NLUP (32) /** * @brief Main entry point for saxpy. */ int main(int argc, char *argv[]) { int i, n, iret, ial; size_t nbytes; float a = 2.0f, *x, *y, *yhost, *yaccl, maxabserr; struct timespec rt[2]; double wt; // walltime /* * We need 1 ns time resolution. */ check1ns(); printf("The system supports 1 ns time resolution\n"); /* * check the number of accelerators */ if (0 == omp_get_num_devices()) { printf("No accelerator found ... exit\n"); exit(EXIT_FAILURE); } /* * preparation */ n = TWO26; nbytes = sizeof(float) * n; iret = 0; if (NULL == (x = (float *) malloc(nbytes))) iret = -1; if (NULL == (y = (float *) malloc(nbytes))) iret = -1; if (NULL == (yhost = (float *) malloc(nbytes))) iret = -1; if (NULL == (yaccl = (float *) malloc(nbytes))) iret = -1; if (0 != iret) { printf("error: memory allocation\n"); free(x); free(y); free(yhost); free(yaccl); exit(EXIT_FAILURE); } #pragma omp parallel for default(none) \ shared(a, x, y, yhost, yaccl, n) private(i) for (i = 0; i < n; ++i) { x[i] = rand() % 32 / 32.0f; y[i] = rand() % 32 / 32.0f; yhost[i] = a * x[i] + y[i]; // yhost will be used as reference value yaccl[i] = 0.0f; } printf("total size of x and y is %9.1f MB\n", 2.0 * nbytes / (1 << 20)); printf("tests are averaged over %2d loops\n", NLUP); /* * saxpy on host */ /* * See hsaxpy.c for details: */ memcpy(yaccl, y, nbytes); wtcalc = -1.0; // skip 1st run for timing hsaxpy(n, a, x, yaccl); // check yaccl maxabserr = -1.0f; for (i = 0; i < n; ++i) { maxabserr = fabsf(yaccl[i] - yhost[i]) > maxabserr? fabsf(yaccl[i] - yhost[i]) : maxabserr; } // skip 2nd run for timing hsaxpy(n, a, x, yaccl); // timing : start wtcalc = 0.0; clock_gettime(CLOCK_REALTIME, rt + 0); for (int ilup = 0; ilup < 1; ++ilup) { hsaxpy(n, a, x, yaccl); } clock_gettime(CLOCK_REALTIME, rt + 1); wt=(rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host: %9.1f MB/s %9.1f MB/s maxabserr = %9.1f\n", 3.0 * nbytes / ((1 << 20) * wt), 3.0 * nbytes / ((1 << 20) * wtcalc), maxabserr); /* * saxpy on accl */ for (ial = 0; ial < 6; ++ial) { /* * See asaxpy.c for details: * * ial: * * 0: <<<2^7 , 2^7 >>>, auto scheduling * 1: <<<2^16, 2^10>>>, manual scheduling * 2: <<<2^15, 2^7 >>>, manual scheduling, 16x loop unrolling (2^15*2^7*16==2^26) * 3: <<<2^12, 2^7 >>>, auto scheduling, 16x loop unrolling * 4: de-linearize the vector and then collapse the ji-loop. * otherwise: hipblasSaxpy in HIPBLAS */ memcpy(yaccl, y, nbytes); wtcalc = -1.0; // skip 1st run for timing asaxpy(n, a, x, yaccl, ial); // check yaccl maxabserr = -1.0f; for (i = 0; i < n; ++i) { maxabserr = fabsf(yaccl[i] - yhost[i]) > maxabserr? fabsf(yaccl[i] - yhost[i]) : maxabserr; } // skip 2nd run for timing asaxpy(n, a, x, yaccl, ial); // timing : start wtcalc = 0.0; clock_gettime(CLOCK_REALTIME, rt + 0); for (int ilup = 0; ilup < NLUP; ++ilup) { asaxpy(n, a, x, yaccl, ial); } clock_gettime(CLOCK_REALTIME, rt + 1); wt=(rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accl (impl. %d)\ntotal: %9.1f MB/s kernel: %9.1f MB/s maxabserr = %9.1f\n\n", ial, NLUP * 3.0 * nbytes / ((1 << 20) * wt), NLUP * 3.0 * nbytes / ((1 << 20) * wtcalc), maxabserr); } /* * release memory */ free(x); free(y); free(yhost); free(yaccl); return 0; }
selu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "selu_kernel_arm.h" #include "neon_mathfun.h" #include <math.h> #include <arm_neon.h> void selu_kernel(int i, int id, void* data, const float* input, float* output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = ((int*)data)[0]; float32x4_t _one = vdupq_n_f32(1.f); float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float* cur_input = input + id * step; float* cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0.f) cur_output[0] = (exp(cur_input[0]) - 1.f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
matvec.h
#ifndef __MATVEC_H__ #define __MATVEC_H__ #include <complex> #include <algorithm> #include "numpy/ndarraytypes.h" #include "openmp.h" #if defined(_OPENMP) #include "csrmv_merge.h" template<typename I, typename T1,typename T2> void inline csr_matvec_contig(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T1 a, const T2 x[], I rco[], T2 vco[], T2 y[]) { csrmv_merge(overwrite_y,n,Ap,Aj,Ax,a,x,rco,vco,y); } template<typename I, typename T1,typename T2> void inline csr_matvec_strided(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], I rco[], T2 vco[], const npy_intp y_stride, T2 y[]) { csrmv_merge_strided(overwrite_y,n,Ap,Aj,Ax,a,x_stride,x,rco,vco,y_stride,y); } template <typename I, typename T1, typename T2> void dia_matvec_contig(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const T2 x[], T2 y[]) { if(overwrite_y){ #pragma omp for schedule(static) for(I n=0;n<n_row;n++){ y[n] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + i*L + j_start; const T2 * x_row = x + j_start; T2 * y_row = y + i_start; #pragma omp for schedule(static) for(I n=0;n<N;n++){ y_row[n] += (T2)(a * diag[n]) * x_row[n]; } } } template <typename I, typename T1, typename T2> void dia_matvec_strided(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(overwrite_y){ #pragma omp for schedule(static) for(I n=0;n<n_row;n++){ y[n * y_stride] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + i*L + j_start; const T2 * x_row = x + j_start * x_stride; T2 * y_row = y + i_start * y_stride; #pragma omp for schedule(static) for(I n=0;n<N;n++){ y_row[n * y_stride] += (T2)(a * diag[n]) * x_row[n * x_stride]; } } } template<typename I, typename T1,typename T2> void csc_matvec_contig(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Ai[], const T1 Ax[], const T1 a, const T2 x[], T2 y[]) { const int nthread = omp_get_num_threads(); const I chunk = std::max((I)1,n_row/(100*nthread)); if(overwrite_y){ #pragma omp for schedule(static) for(I j = 0; j < n_row; j++){ y[j] = 0; } } #pragma omp for schedule(dynamic,chunk) for(I j = 0; j < n_col; j++){ I col_start = Ap[j]; I col_end = Ap[j+1]; for(I ii = col_start; ii < col_end; ii++){ const I i = Ai[ii]; const T2 aa = (T2)(a * Ax[ii]) * x[j]; atomic_add(y[i],aa); } } } template<typename I, typename T1,typename T2> void csc_matvec_strided(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Ai[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { const int nthread = omp_get_num_threads(); const I chunk = std::max((I)1,n_row/(100*nthread)); if(overwrite_y){ #pragma omp for schedule(static) for(I j = 0; j < n_row; j++){ y[j * y_stride] = 0; } } #pragma omp for schedule(dynamic,chunk) for(I j = 0; j < n_col; j++){ I col_start = Ap[j]; I col_end = Ap[j+1]; for(I ii = col_start; ii < col_end; ii++){ const I i = Ai[ii]; const T2 aa = (T2)(a * Ax[ii]) * x[j * x_stride]; atomic_add(y[i * y_stride],aa); } } } #else template<typename I, typename T1,typename T2> void csr_matvec_contig(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T1 a, const T2 x[], I rco[], T2 vco[], T2 y[]) { const T2 a_cast = a; if(overwrite_y){ for(I k = 0; k<n; k++){ T2 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += (T2)Ax[jj] * x[Aj[jj]]; } y[k] = a_cast * sum; } }else{ for(I k = 0; k<n; k++){ T2 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += (T2)Ax[jj] * x[Aj[jj]]; } y[k] += a_cast * sum; } } } template<typename I, typename T1,typename T2> void csr_matvec_strided(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], I rco[], T2 vco[], const npy_intp y_stride, T2 y[]) { const T2 a_cast = a; if(overwrite_y){ for(I k = 0; k<n; k++){ T2 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += (T2)Ax[jj] * x[Aj[jj] * x_stride]; } y[k * y_stride] = a_cast * sum; } }else{ for(I k = 0; k<n; k++){ T2 sum = 0; for(I jj = Ap[k]; jj < Ap[k+1]; jj++){ sum += (T2)Ax[jj] * x[Aj[jj] * x_stride]; } y[k * y_stride] += a_cast * sum; } } } template <typename I, typename T1, typename T2> void dia_matvec_contig(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const T2 x[], T2 y[]) { if(overwrite_y){ for(I i = 0; i < n_row; i++){ y[i] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_row = x + j_start; T2 * y_row = y + i_start; for(I n = 0; n < N; n++){ y_row[n] += (T2)(a * diag[n]) * x_row[n]; } } } template <typename I, typename T1, typename T2> void dia_matvec_strided(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(overwrite_y){ for(I i = 0; i < n_row; i++){ y[i] = 0; } } for(I i = 0; i < n_diags; i++){ const I k = offsets[i]; //diagonal offset const I i_start = std::max<I>(0,-k); const I j_start = std::max<I>(0, k); const I j_end = std::min<I>(std::min<I>(n_row + k, n_col),L); const I N = j_end - j_start; //number of elements to process const T1 * diag = diags + (npy_intp)i*L + j_start; const T2 * x_row = x + j_start * x_stride; T2 * y_row = y + i_start * y_stride; for(I n = 0; n < N; n++){ y_row[n * y_stride] += (T2)(a * diag[n]) * x_row[n * x_stride]; } } } template<typename I, typename T1,typename T2> void csc_matvec_contig(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Ai[], const T1 Ax[], const T1 a, const T2 x[], T2 y[]) { if(overwrite_y){ for(I j = 0; j < n_row; j++){ y[j] = 0; } } for(I j = 0; j < n_col; j++){ I col_start = Ap[j]; I col_end = Ap[j+1]; for(I ii = col_start; ii < col_end; ii++){ const I i = Ai[ii]; y[i] += (T2)(a * Ax[ii]) * x[j]; } } } template<typename I, typename T1,typename T2> void csc_matvec_strided(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Ai[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(overwrite_y){ for(I j = 0; j < n_row; j++){ y[j] = 0; } } for(I j = 0; j < n_col; j++){ I col_start = Ap[j]; I col_end = Ap[j+1]; for(I ii = col_start; ii < col_end; ii++){ const I i = Ai[ii]; y[i * y_stride] += (T2)(a * Ax[ii]) * x[j * x_stride]; } } } #endif template<typename I, typename T1,typename T2> void csr_matvec(const bool overwrite_y, const I n, const I Ap[], const I Aj[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], I rco[], T2 vco[], const npy_intp y_stride, T2 y[]) { if(y_stride == 1 && x_stride == 1){ csr_matvec_contig(overwrite_y,n,Ap,Aj,Ax,a,x,rco,vco,y); } else{ csr_matvec_strided(overwrite_y,n,Ap,Aj,Ax,a,x_stride,x,rco,vco,y_stride,y); } } template <typename I, typename T1, typename T2> void dia_matvec(const bool overwrite_y, const I n_row, const I n_col, const I n_diags, const I L, const I offsets[], const T1 diags[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(y_stride == 1 && x_stride == 1){ dia_matvec_contig(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x,y); } else{ dia_matvec_strided(overwrite_y,n_row,n_col,n_diags,L,offsets,diags,a,x_stride,x,y_stride,y); } } template<typename I, typename T1,typename T2> void csc_matvec(const bool overwrite_y, const I n_row, const I n_col, const I Ap[], const I Ai[], const T1 Ax[], const T1 a, const npy_intp x_stride, const T2 x[], const npy_intp y_stride, T2 y[]) { if(y_stride == 1 && x_stride == 1){ csc_matvec_contig(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x,y); } else{ csc_matvec_strided(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x_stride,x,y_stride,y); } } #endif
cwa_smd_opt.h
#ifndef METHODS_CWA_SMD_OPT_H #define METHODS_CWA_SMD_OPT_H #include <gsl/gsl_multimin.h> #include "quartz_internal/util/gsl_converter.h" namespace method { namespace cwa_smd_opt { namespace details { inline gsl_multimin_fdfminimizer_type * minimizer_map(const std::string type) { if(type == "steepest_descent") { return const_cast<gsl_multimin_fdfminimizer_type *>(gsl_multimin_fdfminimizer_steepest_descent); } else if(type == "conjugate_pr") { return const_cast<gsl_multimin_fdfminimizer_type *>(gsl_multimin_fdfminimizer_conjugate_pr); } else if(type == "conjugate_fr") { return const_cast<gsl_multimin_fdfminimizer_type *>(gsl_multimin_fdfminimizer_conjugate_fr); } else if(type == "bfgs") { return const_cast<gsl_multimin_fdfminimizer_type *>(gsl_multimin_fdfminimizer_vector_bfgs); } else if(type == "bfgs2") { return const_cast<gsl_multimin_fdfminimizer_type *>(gsl_multimin_fdfminimizer_vector_bfgs2); } else { throw Error("minimizer " + type + " is not implemented"); } } struct cwa_smd_opt_param { arma::mat original_points; arma::vec expectations_ref; std::vector<math::Polynomial<double>> original_operators; arma::vec weights; arma::vec scaling; long long grade; }; inline double penalty_function( const arma::mat & points, const arma::vec & expectations_ref, const std::vector<math::Polynomial<double>> & original_operators, const arma::vec & weights, const arma::vec & scaling, const long long grade) { double result = 0; for (arma::uword i = 0; i < original_operators.size(); i++) { const long long the_grade = original_operators[i].grade(); if (the_grade < grade && the_grade > 0) { const double result_from_cwa = cwa_smd::details::expectation(original_operators[i], points, weights, scaling); result += std::pow(result_from_cwa - expectations_ref(i), 2); } } return result; } inline arma::mat penalty_function_derivative( const arma::mat & points, const arma::vec & expectations_ref, const std::vector<math::Polynomial<double>> & original_operators, const arma::vec & weights, const arma::vec & scaling, const long long grade ) { arma::mat result(arma::size(points), arma::fill::zeros); for (arma::uword i = 0; i < original_operators.size(); i++) { const long long the_grade = original_operators[i].grade(); if (the_grade < grade && the_grade > 0) { const double result_from_cwa = cwa_smd::details::expectation(original_operators[i], points, weights, scaling); for (arma::uword j = 0; j < points.n_cols; j++) { const arma::vec point = arma::diagmat(1.0 / scaling) * points.col(j); for (arma::uword k = 0; k < points.n_rows / 2; k++) { const math::Polynomial<double> x_derivative = original_operators[i].derivative( k) / scaling(k); const math::Polynomial<double> p_derivative = original_operators[i].derivative( k + points.n_rows / 2) / scaling(k + points.n_rows / 2); result(k, j) -= 2.0 * (result_from_cwa - expectations_ref(i)) * weights(j) * x_derivative.at(point) / arma::sum(weights); result(k + points.n_rows / 2, j) -= 2.0 * (result_from_cwa - expectations_ref(i)) * weights(j) * p_derivative.at(point) / arma::sum(weights); } } } } return -result; } inline double penalty_function_gsl_wrapper(const gsl_vector * flattened_points, void * param) { const arma::vec arma_flattened_points = gsl::convert_vec(flattened_points); const auto converted_param = *(cwa_smd_opt_param *) param; const arma::mat points = arma::reshape(arma_flattened_points, arma::size( converted_param.original_points)); return penalty_function(points, converted_param.expectations_ref, converted_param.original_operators, converted_param.weights, converted_param.scaling, converted_param.grade); } inline void penalty_function_derivative_gsl_wrapper( const gsl_vector * flattened_points, void * param, gsl_vector * g) { const arma::vec arma_flattened_points = gsl::convert_vec(flattened_points); const auto converted_param = *(cwa_smd_opt_param *) param; const arma::mat points = arma::reshape(arma_flattened_points, arma::size( converted_param.original_points)); const arma::vec result = arma::vectorise( penalty_function_derivative(points, converted_param.expectations_ref, converted_param.original_operators, converted_param.weights, converted_param.scaling, converted_param.grade)); const auto result_pointer = gsl::convert_vec(result); gsl_vector_memcpy(g, result_pointer); gsl_vector_free(result_pointer); } inline void penalty_function_fdf_gsl_wrapper( const gsl_vector * a_derivatives, void * param, double * f, gsl_vector * g) { penalty_function_derivative_gsl_wrapper(a_derivatives, param, g); *f = penalty_function_gsl_wrapper(a_derivatives, param); } inline std::tuple<arma::mat, double, double, int> cwa_optimize(const cwa_smd_opt_param input, const double initial_step_size, const double tolerance, const double gradient_tolerance, const size_t total_steps, const std::string type) { /* allocate memory for minimization process */ const auto minimizer_type = minimizer_map(type); const arma::uword n = input.original_points.n_elem; const auto penalty_function_value = penalty_function(input.original_points, input.expectations_ref, input.original_operators, input.weights, input.scaling, input.grade); const double gradient_module = arma::norm( arma::vectorise(penalty_function_derivative(input.original_points, input.expectations_ref, input.original_operators, input.weights, input.scaling, input.grade))); if (penalty_function_value < tolerance && gradient_module < gradient_tolerance) { return {input.original_points, penalty_function_value, gradient_module, 0}; } auto minimizer_environment = gsl_multimin_fdfminimizer_alloc(minimizer_type, n); /* assigning function to minimizer object */ gsl_multimin_function_fdf minimizer_object; minimizer_object.f = &penalty_function_gsl_wrapper; minimizer_object.df = &penalty_function_derivative_gsl_wrapper; minimizer_object.fdf = &penalty_function_fdf_gsl_wrapper; minimizer_object.n = n; minimizer_object.params = (void *) &input; /* starting point */ const arma::vec flattened = arma::vectorise(input.original_points); gsl_vector * points = gsl::convert_vec(flattened); /* set environment */ gsl_multimin_fdfminimizer_set(minimizer_environment, &minimizer_object, points, initial_step_size, tolerance); size_t iter = 0; int status = GSL_CONTINUE; do { iter++; status = gsl_multimin_fdfminimizer_iterate(minimizer_environment); if (status) { throw Error(gsl_strerror(status)); } status = gsl_multimin_test_gradient(minimizer_environment->gradient, gradient_tolerance); if (status == GSL_SUCCESS) { const arma::vec result = gsl::convert_vec(minimizer_environment->x); const double f = minimizer_environment->f; const double df = arma::norm(gsl::convert_vec(minimizer_environment->gradient)); gsl_multimin_fdfminimizer_free(minimizer_environment); gsl_vector_free(points); return {arma::reshape(result, arma::size(input.original_points)), f, df, iter}; } } while (status == GSL_CONTINUE && iter < total_steps); throw Error("fail to converge towards the solution"); } } // namespace details struct State { public: arma::mat points; arma::vec weights; arma::vec masses; arma::uword grade; arma::uvec expectation_table; arma::vec expectations; arma::uvec positional_indices; arma::uvec momentum_indices; arma::vec scaling; // Establish an easy way to construct your State template<typename PhaseSpaceDistribution> State(const PhaseSpaceDistribution & initial, const arma::uvec & grid, const arma::mat & range, const arma::vec & scaling, const arma::vec & masses, const arma::uword grade) : points(math::space::points_generate(grid, range)), weights(arma::real(at(initial, points))), masses(masses), grade(grade), expectation_table(math::space::grids_to_table( grade * arma::ones<arma::uvec>(points.n_rows))), scaling(scaling) { if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } const arma::uword dimension = grid.n_elem; const arma::uword length = std::pow(grade, dimension); this->expectations = arma::vec(length); this->positional_indices = arma::uvec(dimension / 2); this->momentum_indices = arma::uvec(dimension / 2); const arma::vec ranges = range.col(1) - range.col(0); // exponents check in #pragma omp parallel for for (arma::uword i = 0; i < dimension / 2; i++) { arma::uvec X = arma::zeros<arma::uvec>(dimension); arma::uvec P = arma::zeros<arma::uvec>(dimension); X(i) = 1; P(i + dimension / 2) = 1; this->positional_indices(i) = math::space::indices_to_index(X, this->expectation_table); this->momentum_indices(i) = math::space::indices_to_index(P, this->expectation_table); } // expectations check in #pragma omp parallel for for (arma::uword i = 0; i < length; i++) { const lvec indices = arma::conv_to<lvec>::from( math::space::index_to_indices(i, this->expectation_table)); this->expectations(i) = cwa_smd::details::expectation(math::polynomial::Term(1.0, indices), this->points, this->weights, this->scaling); } } template<typename PhaseSpaceDistribution> State(const PhaseSpaceDistribution & initial, const arma::uvec & grid, const arma::mat & range, const arma::uword grade) : points(math::space::points_generate(grid, range)), weights(arma::real(at(initial, points))), masses(arma::ones<arma::vec>(grid.n_rows / 2)), grade(grade), expectation_table(math::space::grids_to_table( grade * arma::ones<arma::uvec>(points.n_rows))) { if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } const auto dimension = grid.n_elem; const auto length = std::pow(grade, dimension); this->expectations = arma::vec(length); this->positional_indices = arma::uvec(dimension / 2); this->momentum_indices = arma::uvec(dimension / 2); const arma::vec ranges = range.col(1) - range.col(0); this->scaling = ranges; // this->scaling = arma::ones(arma::size(ranges)); // exponents check in for (arma::uword i = 0; i < dimension / 2; i++) { arma::uvec X = arma::zeros<arma::uvec>(dimension); arma::uvec P = arma::zeros<arma::uvec>(dimension); X(i) = 1; P(i + dimension / 2) = 1; this->positional_indices(i) = math::space::indices_to_index(X, this->expectation_table); this->momentum_indices(i) = math::space::indices_to_index(P, this->expectation_table); } // expectations check in for (arma::uword i = 0; i < length; i++) { const lvec indices = arma::conv_to<lvec>::from( math::space::index_to_indices(i, this->expectation_table)); this->expectations(i) = cwa_smd::details::expectation( math::polynomial::Term(1.0, indices), this->points, this->weights, this->scaling); } } inline State(const arma::mat & points, const arma::vec & weights, const arma::vec & masses, const arma::uvec & expectation_table, const arma::vec & expectations, const arma::uvec & positional_indices, const arma::uvec & momentum_indices, const arma::vec & scaling, const arma::uword grade) : points(points), weights(weights), masses(masses), grade(grade), expectation_table(expectation_table), expectations(expectations), positional_indices(positional_indices), momentum_indices(momentum_indices), scaling(scaling) {} inline State(const State & state) : points(state.points), weights(state.weights), masses(state.masses), grade(state.grade), expectation_table(state.expectation_table), expectations(state.expectations), positional_indices(state.positional_indices), momentum_indices(state.momentum_indices), scaling(state.scaling) {} inline arma::uword dim() const { return points.n_rows / 2; } inline State normalise() const { State state = *this; state.weights = state.weights / arma::sum(state.weights); return state; } inline arma::vec positional_expectation() const { return method::cwa::State(this->points, this->weights, this->masses) .positional_expectation(); } inline arma::vec momentum_expectation() const { return method::cwa::State(this->points, this->weights, this->masses) .momentum_expectation(); } State operator+(const State & B) const { if (!arma::approx_equal(this->weights, B.weights, "abs_diff", 1e-16) || !arma::approx_equal(this->masses, B.masses, "abs_diff", 1e-16)) { throw Error("Different cwa states are being added"); } State state = B; state.points += this->points; state.expectations += this->expectations; return state; } State operator*(const double B) const { State state = *this; state.expectations *= B; state.points *= B; return state; } template<typename T> auto expectation(const math::Polynomial<T> & polynomial) const { return cwa_smd::details::at_search(polynomial, this->points, this->weights, this->expectations, this->expectation_table, this->scaling, this->grade) * polynomial.at(this->scaling); } template<typename T> arma::vec expectation(const std::vector<math::Polynomial<T>> & polynomials) const { arma::vec result(polynomials.size()); #pragma omp parallel for for (arma::uword i = 0; i < result.n_elem; i++) { result(i) = this->expectation(polynomials[i]); } return result; } State & operator=(const State &) = default; }; struct Operator { public: math::Polynomial<double> potential; math::Polynomial<double> H; std::vector<math::Polynomial<double>> original_operators; std::vector<math::Polynomial<double>> operators; Operator(const State & state, const math::Polynomial<double> & potential) : potential(potential), H(hamiltonian(potential, state.masses).scale(state.scaling)), operators() { std::vector<math::Polynomial<double>> op(std::pow(state.grade, state.dim() * 2)); std::vector<math::Polynomial<double>> original_op(std::pow(state.grade, state.dim() * 2)); op[0] = math::Polynomial<double>(state.dim() * 2, 0.0); original_op[0] = math::Polynomial<double>(state.dim() * 2, 1.0); for (arma::uword i = 1; i < op.size(); i++) { const auto observable = math::Polynomial(math::polynomial::Term<double>(1.0, math::space::index_to_indices( i, state.expectation_table))); original_op[i] = observable; const arma::uword cut_off = std::min(observable.grade(), H.grade()) / 2; const auto moyal = moyal_bracket(math::Polynomial(observable), H, state.scaling, cut_off); op[i] = moyal; } this->operators = op; this->original_operators = original_op; } inline PropagationType propagation_type() const { return Classic; } State operator()(const State & state) const { arma::mat p_submatrix = state.points.rows(state.dim(), 2 * state.dim() - 1); p_submatrix.each_col() /= state.masses; const arma::mat points_change_list = arma::join_cols(p_submatrix, cwa::details::force(this->potential, state.points.rows(0, state.dim() - 1))); arma::vec expectation_change_list = arma::vec(arma::size(state.expectations)); #pragma omp parallel for for (arma::uword i = 0; i < expectation_change_list.n_elem; i++) { expectation_change_list(i) = cwa_smd::details::at_search(this->operators[i], state.points, state.weights, state.expectations, state.expectation_table, state.scaling, state.grade); } return State(points_change_list, state.weights, state.masses, state.expectation_table, expectation_change_list, state.positional_indices, state.momentum_indices, state.scaling, state.grade); } }; template<typename Potential> OperatorWrapper<Operator, State, Potential> cwa_opt(const double initial_step_size, const double tolerance, const double gradient_tolerance, const size_t total_steps, const std::string type = "bfgs2", const int print_level = 0) { return [initial_step_size, tolerance, gradient_tolerance, total_steps, type, print_level ](const Operator & cwa_smd_opt_operator, const Potential & potential) -> Propagator<State> { return [initial_step_size, tolerance, gradient_tolerance, total_steps, &cwa_smd_opt_operator, type, print_level ] (const State & state, const double dt) -> State { const arma::vec & ref_expectations = state.expectations; const arma::mat & points = state.points; const auto & original_operators = cwa_smd_opt_operator.original_operators; details::cwa_smd_opt_param input{points, ref_expectations, original_operators, state.weights, state.scaling, (long long) state.grade}; const auto opt_result = details::cwa_optimize(input, initial_step_size, tolerance, gradient_tolerance, total_steps, type); const arma::mat new_points = std::get<0>(opt_result); const double f = std::get<1>(opt_result); const double df = std::get<2>(opt_result); const int iter = std::get<3>(opt_result); if(print_level > 2) { fmt::print("f: {0:20.10f}, df: {1:20.10f}, iter: {2}", f, df, iter); fmt::print("\n"); } State new_state = state; new_state.points = new_points; return new_state; }; }; } } // namespace cwa } #endif //METHODS_CWA_SMD_OPT_H
declare_variant_device_isa_codegen_1.c
// RUN: %clang_cc1 -verify -fopenmp -x c -triple %itanium_abi_triple -emit-llvm %s -o - -fopenmp-version=45 | FileCheck %s --check-prefix=GENERIC // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t -fopenmp-version=45 %s // RUN: %clang_cc1 -fopenmp -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=GENERIC // RUN: %clang_cc1 -target-feature +avx512f -verify -fopenmp -x c -triple %itanium_abi_triple -emit-llvm %s -o - -fopenmp-version=45 | FileCheck %s --check-prefix=WITHFEATURE // RUN: %clang_cc1 -target-feature +avx512f -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t -fopenmp-version=45 %s // RUN: %clang_cc1 -target-feature +avx512f -fopenmp -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - -fopenmp-version=45 | FileCheck %s --check-prefix=WITHFEATURE // RUN: %clang_cc1 -verify -fopenmp -x c -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s --check-prefix=GENERIC // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=GENERIC // RUN: %clang_cc1 -target-feature +avx512f -verify -fopenmp -x c -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s --check-prefix=WITHFEATURE // RUN: %clang_cc1 -target-feature +avx512f -fopenmp -x c++ -std=c++11 -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -target-feature +avx512f -fopenmp -x c++ -triple %itanium_abi_triple -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=WITHFEATURE // expected-no-diagnostics // Test taken from PR46338 (by linna su) #ifndef HEADER #define HEADER void base_saxpy(int, float, float *, float *); void avx512_saxpy(int, float, float *, float *); #pragma omp declare variant(avx512_saxpy) \ match(device = {isa(avx512f)}) void base_saxpy(int n, float s, float *x, float *y) { #pragma omp parallel for for (int i = 0; i < n; i++) y[i] = s * x[i] + y[i]; } void avx512_saxpy(int n, float s, float *x, float *y) { #pragma omp parallel for simd simdlen(16) aligned(x, y : 64) for (int i = 0; i < n; i++) y[i] = s * x[i] + y[i]; } void caller(int n, float s, float *x, float *y) { // GENERIC: define {{.*}}void @{{.*}}caller // GENERIC: call void @{{.*}}base_saxpy // WITHFEATURE: define {{.*}}void @{{.*}}caller // WITHFEATURE: call void @{{.*}}avx512_saxpy base_saxpy(n, s, x, y); } __attribute__((target("avx512f"))) void variant_caller(int n, float s, float *x, float *y) { // GENERIC: define {{.*}}void @{{.*}}variant_caller // GENERIC: call void @{{.*}}avx512_saxpy // WITHFEATURE: define {{.*}}void @{{.*}}variant_caller // WITHFEATURE: call void @{{.*}}avx512_saxpy base_saxpy(n, s, x, y); } #endif
image_handler.h
#include "parameters.h" class ImageHandler { public: ros::NodeHandle nh; ros::Publisher pub_image; cv::Mat image_range; cv::Mat image_noise; cv::Mat image_intensity; pcl::PointCloud<PointType>::Ptr cloud_track; ImageHandler() { cloud_track.reset(new pcl::PointCloud<PointType>()); cloud_track->resize(IMAGE_HEIGHT * IMAGE_WIDTH); pub_image = nh.advertise<sensor_msgs::Image>("loop_detector/image_stack", 1); } void cloud_handler(const sensor_msgs::PointCloud2ConstPtr &cloud_msg) { // convert cloud pcl::PointCloud<PointOuster>::Ptr laser_cloud(new pcl::PointCloud<PointOuster>()); pcl::fromROSMsg(*cloud_msg, *laser_cloud); assert((int)laser_cloud->size() % IMAGE_HEIGHT * IMAGE_WIDTH == 0); // reset images image_range = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0)); image_noise = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0)); image_intensity = cv::Mat(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC1, cv::Scalar(0)); #pragma omp parallel for num_threads(NUM_THREADS) for (int u = 0; u < IMAGE_HEIGHT; u++) { for (int v = 0; v < IMAGE_WIDTH; v++) { const auto& pt = laser_cloud->points[u * IMAGE_WIDTH + v]; // extract sensor data float range = std::sqrt(pt.x*pt.x + pt.y*pt.y + pt.z*pt.z); float noise = pt.noise; float intensity = pt.intensity; // limit to (0~255) noise = std::min(noise, 255.0f); intensity = std::min(intensity, 255.0f); // update all images image_range.at<uint8_t>(u, v) = std::min(range * 20, 255.0f); image_noise.at<uint8_t>(u, v) = noise; image_intensity.at<uint8_t>(u, v) = intensity; // update cloud PointType* p = &cloud_track->points[u * IMAGE_WIDTH + v]; if (range >= 0.1) { p->x = pt.x; p->y = pt.y; p->z = pt.z; p->intensity = intensity; } else { p->x = p->y = p->z = p->intensity = 0; } } } if (pub_image.getNumSubscribers() != 0) { // option 1: display intensity image // cv::Mat image_visualization = image_intensity.clone(); // cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB); // pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8"); // option 2: display all images from available lidar channels cv::Mat image_visualization; cv::vconcat(image_noise, image_intensity, image_visualization); cv::vconcat(image_visualization, image_range, image_visualization); cv::cvtColor(image_visualization, image_visualization, CV_GRAY2RGB); cv::putText(image_visualization, "Ambient", cv::Point2f(5, 20 + IMAGE_HEIGHT*0), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2); cv::putText(image_visualization, "Intensity", cv::Point2f(5, 20 + IMAGE_HEIGHT*1), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2); cv::putText(image_visualization, "Range", cv::Point2f(5, 20 + IMAGE_HEIGHT*2), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(255,0,255), 2); pubImage(&pub_image, image_visualization, cloud_msg->header, "bgr8"); } // static tf in case tf between base_link and lidar is missing static tf::TransformBroadcaster tf_base_to_lidar; static tf::Transform base_to_lidar = tf::Transform(tf::createQuaternionFromRPY(0, 0, 0), tf::Vector3(0, 0, 0)); tf_base_to_lidar.sendTransform(tf::StampedTransform(base_to_lidar, cloud_msg->header.stamp, "base_link", "velodyne")); } void pubImage(ros::Publisher *this_pub, const cv::Mat& this_image, std_msgs::Header this_header, string image_format) { static cv_bridge::CvImage bridge; bridge.header = this_header; bridge.encoding = image_format; bridge.image = this_image; this_pub->publish(bridge.toImageMsg()); } };
bfecc_convection.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_BFECC_CONVECTION_INCLUDED ) #define KRATOS_BFECC_CONVECTION_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "includes/variables.h" #include "utilities/timer.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/openmp_utils.h" namespace Kratos { template<std::size_t TDim> class BFECCConvection { public: KRATOS_CLASS_POINTER_DEFINITION(BFECCConvection<TDim>); BFECCConvection(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure) : mpSearchStructure(pSearchStructure) { } ~BFECCConvection() { } //********************************************************************************************** //********************************************************************************************** void BFECCconvect(ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; //do movement Vector N(TDim + 1); Vector N_valid(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); PointerVector< Element > elem_backward( rModelPart.Nodes().size()); std::vector< Vector > Ns( rModelPart.Nodes().size()); std::vector< bool > found( rModelPart.Nodes().size()); // Allocate non-historical variables for (auto &r_node : rModelPart.Nodes()) { r_node.SetValue(rVar, 0.0); } //FIRST LOOP: estimate rVar(n+1) #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> bckPos = iparticle->Coordinates(); const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,bckPos,vel, N,N_valid, pelement,pelement_valid, result_begin, max_results, -1.0, substeps, conv_var, has_valid_elem_pointer); found[i] = is_found; if(is_found) { //save position backwards elem_backward(i) = pelement; Ns[i] = N; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } else if(has_valid_elem_pointer) { //save position backwards elem_backward(i) = pelement_valid; Ns[i] = N_valid; Geometry< Node < 3 > >& geom = pelement_valid->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N_valid[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } } //now obtain the value AT TIME STEP N by taking it from N+1 #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> fwdPos = iparticle->Coordinates(); const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var,1); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, N_valid, pelement, pelement_valid, result_begin, max_results, 1.0, substeps, conv_var,has_valid_elem_pointer); if(is_found) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) ); } //store correction iparticle->GetValue(rVar) = 1.5*iparticle->FastGetSolutionStepValue(rVar,1) - 0.5 * phi_old; // iparticle->FastGetSolutionStepValue(rVar) = iparticle->GetValue(rVar) - 0.5 * (phi2 - iparticle->FastGetSolutionStepValue(rVar,1)); } else { iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1); } } #pragma omp parallel for for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; bool is_found = found[i]; if(is_found) { Vector N = Ns[i]; Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry(); double phi1 = N[0] * ( geom[0].GetValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].GetValue(rVar) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } // else // std::cout << "it should find it" << std::endl; } KRATOS_CATCH("") } bool ConvectBySubstepping( const double dt, array_1d<double,3>& position, //IT WILL BE MODIFIED const array_1d<double,3>& initial_velocity, Vector& N, Vector& N_valid, Element::Pointer& pelement, Element::Pointer& pelement_valid, typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin, const unsigned int max_results, const double velocity_sign, const double subdivisions, const Variable<array_1d<double,3> >& conv_var, bool& has_valid_elem_pointer) { bool is_found = false; array_1d<double,3> veulerian; const double small_dt = dt/subdivisions; if(velocity_sign > 0.0) //going from the past to the future { noalias(position) += small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = static_cast<double>(substep)/subdivisions; const double old_step_factor = (1.0 - new_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) += small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } else //going from the future to the past { noalias(position) -= small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //this factors get inverted from the other case const double old_step_factor = static_cast<double>(substep)/subdivisions; const double new_step_factor = (1.0 - old_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) -= small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } return is_found; } void ResetBoundaryConditions(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(rVar)) { inode->FastGetSolutionStepValue(rVar)=inode->GetSolutionStepValue(rVar,1); } } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar); } } KRATOS_CATCH("") } private: typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure; }; } // namespace Kratos. #endif // KRATOS_BFECC_CONVECTION_INCLUDED defined
ExtraFullGraphSearcher.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_SPANN_EXTRASEARCHER_H_ #define _SPTAG_SPANN_EXTRASEARCHER_H_ #include "inc/Helper/VectorSetReader.h" #include "inc/Helper/AsyncFileReader.h" #include "IExtraSearcher.h" #include "../Common/TruthSet.h" #include <map> #include <cmath> #include <climits> #include <future> namespace SPTAG { namespace SPANN { extern std::function<std::shared_ptr<Helper::DiskPriorityIO>(void)> f_createAsyncIO; struct Selection { std::string m_tmpfile; size_t m_totalsize; size_t m_start; size_t m_end; std::vector<Edge> m_selections; static EdgeCompare g_edgeComparer; Selection(size_t totalsize, std::string tmpdir) : m_tmpfile(tmpdir + FolderSep + "selection_tmp"), m_totalsize(totalsize), m_start(0), m_end(totalsize) { remove(m_tmpfile.c_str()); m_selections.resize(totalsize); } void SaveBatch() { auto f_out = f_createIO(); if (f_out == nullptr || !f_out->Initialize(m_tmpfile.c_str(), std::ios::out | std::ios::binary | (fileexists(m_tmpfile.c_str()) ? std::ios::in : 0))) { LOG(Helper::LogLevel::LL_Error, "Cannot open %s to save selection for batching!\n", m_tmpfile.c_str()); exit(1); } if (f_out->WriteBinary(sizeof(Edge) * (m_end - m_start), (const char*)m_selections.data(), sizeof(Edge) * m_start) != sizeof(Edge) * (m_end - m_start)) { LOG(Helper::LogLevel::LL_Error, "Cannot write to %s!\n", m_tmpfile.c_str()); exit(1); } std::vector<Edge> batch_selection; m_selections.swap(batch_selection); m_start = m_end = 0; } void LoadBatch(size_t start, size_t end) { auto f_in = f_createIO(); if (f_in == nullptr || !f_in->Initialize(m_tmpfile.c_str(), std::ios::in | std::ios::binary)) { LOG(Helper::LogLevel::LL_Error, "Cannot open %s to load selection batch!\n", m_tmpfile.c_str()); exit(1); } size_t readsize = end - start; m_selections.resize(readsize); if (f_in->ReadBinary(readsize * sizeof(Edge), (char*)m_selections.data(), start * sizeof(Edge)) != readsize * sizeof(Edge)) { LOG(Helper::LogLevel::LL_Error, "Cannot read from %s! start:%zu size:%zu\n", m_tmpfile.c_str(), start, readsize); exit(1); } m_start = start; m_end = end; } size_t lower_bound(SizeType node) { auto ptr = std::lower_bound(m_selections.begin(), m_selections.end(), node, g_edgeComparer); return m_start + (ptr - m_selections.begin()); } Edge& operator[](size_t offset) { if (offset < m_start || offset >= m_end) { LOG(Helper::LogLevel::LL_Error, "Error read offset in selections:%zu\n", offset); } return m_selections[offset - m_start]; } }; #define ProcessPosting(vectorInfoSize) \ for (char *vectorInfo = buffer + listInfo->pageOffset, *vectorInfoEnd = vectorInfo + listInfo->listEleCount * vectorInfoSize; vectorInfo < vectorInfoEnd; vectorInfo += vectorInfoSize) { \ int vectorID = *(reinterpret_cast<int*>(vectorInfo)); \ if (p_exWorkSpace->m_deduper.CheckAndSet(vectorID)) continue; \ auto distance2leaf = p_index->ComputeDistance(queryResults.GetQuantizedTarget(), vectorInfo + sizeof(int)); \ queryResults.AddPoint(vectorID, distance2leaf); \ } \ template <typename ValueType> class ExtraFullGraphSearcher : public IExtraSearcher { public: ExtraFullGraphSearcher() { } virtual ~ExtraFullGraphSearcher() { } virtual bool LoadIndex(Options& p_opt) { m_extraFullGraphFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; std::string curFile = m_extraFullGraphFile; do { auto curIndexFile = f_createAsyncIO(); if (curIndexFile == nullptr || !curIndexFile->Initialize(curFile.c_str(), std::ios::binary | std::ios::in, #ifdef BATCH_READ p_opt.m_searchInternalResultNum, 2, 2, p_opt.m_iSSDNumberOfThreads #else p_opt.m_searchInternalResultNum * p_opt.m_iSSDNumberOfThreads / p_opt.m_ioThreads + 1, 2, 2, p_opt.m_ioThreads #endif )) { LOG(Helper::LogLevel::LL_Error, "Cannot open file:%s!\n", curFile.c_str()); return false; } m_indexFiles.emplace_back(curIndexFile); m_listInfos.emplace_back(0); m_totalListCount += LoadingHeadInfo(curFile, p_opt.m_searchPostingPageLimit, m_listInfos.back()); curFile = m_extraFullGraphFile + "_" + std::to_string(m_indexFiles.size()); } while (fileexists(curFile.c_str())); m_listPerFile = static_cast<int>((m_totalListCount + m_indexFiles.size() - 1) / m_indexFiles.size()); #ifndef _MSC_VER Helper::AIOTimeout.tv_nsec = p_opt.m_iotimeout * 1000; #endif return true; } virtual void SearchIndex(ExtraWorkSpace* p_exWorkSpace, QueryResult& p_queryResults, std::shared_ptr<VectorIndex> p_index, SearchStats* p_stats, std::set<int>* truth, std::map<int, std::set<int>>* found) { const uint32_t postingListCount = static_cast<uint32_t>(p_exWorkSpace->m_postingIDs.size()); p_exWorkSpace->m_deduper.clear(); COMMON::QueryResultSet<ValueType>& queryResults = *((COMMON::QueryResultSet<ValueType>*)&p_queryResults); int diskRead = 0; int diskIO = 0; int listElements = 0; #if defined(ASYNC_READ) && !defined(BATCH_READ) int unprocessed = 0; #endif bool oneContext = (m_indexFiles.size() == 1); for (uint32_t pi = 0; pi < postingListCount; ++pi) { auto curPostingID = p_exWorkSpace->m_postingIDs[pi]; int fileid = 0; ListInfo* listInfo; if (oneContext) { listInfo = &(m_listInfos[0][curPostingID]); } else { fileid = curPostingID / m_listPerFile; listInfo = &(m_listInfos[fileid][curPostingID % m_listPerFile]); } #ifndef BATCH_READ Helper::DiskPriorityIO* indexFile = m_indexFiles[fileid].get(); #endif if (listInfo->listEleCount == 0) { continue; } diskRead += listInfo->listPageCount; diskIO += 1; listElements += listInfo->listEleCount; size_t totalBytes = (static_cast<size_t>(listInfo->listPageCount) << PageSizeEx); char* buffer = (char*)((p_exWorkSpace->m_pageBuffers[pi]).GetBuffer()); #ifdef ASYNC_READ auto& request = p_exWorkSpace->m_diskRequests[pi]; request.m_offset = listInfo->listOffset; request.m_readSize = totalBytes; request.m_buffer = buffer; request.m_status = (fileid << 16) | p_exWorkSpace->m_spaceID; request.m_payload = (void*)listInfo; #ifdef BATCH_READ auto vectorInfoSize = m_vectorInfoSize; request.m_callback = [&p_exWorkSpace, &queryResults, &p_index, vectorInfoSize](Helper::AsyncReadRequest* request) { request->m_readSize = 0; char* buffer = request->m_buffer; ListInfo* listInfo = (ListInfo*)(request->m_payload); ProcessPosting(vectorInfoSize) }; #else request.m_callback = [&p_exWorkSpace](Helper::AsyncReadRequest* request) { p_exWorkSpace->m_processIocp.push(request); }; ++unprocessed; if (!(indexFile->ReadFileAsync(request))) { LOG(Helper::LogLevel::LL_Error, "Failed to read file!\n"); unprocessed--; } #endif #else auto numRead = indexFile->ReadBinary(totalBytes, buffer, listInfo->listOffset); if (numRead != totalBytes) { LOG(Helper::LogLevel::LL_Error, "File %s read bytes, expected: %zu, acutal: %llu.\n", m_extraFullGraphFile.c_str(), totalBytes, numRead); exit(-1); } ProcessPosting(m_vectorInfoSize) #endif } #ifdef ASYNC_READ #ifdef BATCH_READ BatchReadFileAsync(m_indexFiles, (p_exWorkSpace->m_diskRequests).data(), postingListCount); #else while (unprocessed > 0) { Helper::AsyncReadRequest* request; if (!(p_exWorkSpace->m_processIocp.pop(request))) break; --unprocessed; char* buffer = request->m_buffer; ListInfo* listInfo = static_cast<ListInfo*>(request->m_payload); ProcessPosting(m_vectorInfoSize) } #endif #endif if (truth) { for (uint32_t pi = 0; pi < postingListCount; ++pi) { auto curPostingID = p_exWorkSpace->m_postingIDs[pi]; ListInfo* listInfo = &(m_listInfos[curPostingID / m_listPerFile][curPostingID % m_listPerFile]); char* buffer = (char*)((p_exWorkSpace->m_pageBuffers[pi]).GetBuffer()); for (int i = 0; i < listInfo->listEleCount; ++i) { char* vectorInfo = buffer + listInfo->pageOffset + i * m_vectorInfoSize; int vectorID = *(reinterpret_cast<int*>(vectorInfo)); if (truth && truth->count(vectorID)) (*found)[curPostingID].insert(vectorID); } } } if (p_stats) { p_stats->m_totalListElementsCount = listElements; p_stats->m_diskIOCount = diskIO; p_stats->m_diskAccessCount = diskRead; } } bool BuildIndex(std::shared_ptr<Helper::VectorSetReader>& p_reader, std::shared_ptr<VectorIndex> p_headIndex, Options& p_opt) { std::string outputFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; if (outputFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Output file can't be empty!\n"); return false; } int numThreads = p_opt.m_iSSDNumberOfThreads; int candidateNum = p_opt.m_internalResultNum; std::unordered_set<SizeType> headVectorIDS; if (p_opt.m_headIDFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Not found VectorIDTranslate!\n"); return false; } { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize((p_opt.m_indexDirectory + FolderSep + p_opt.m_headIDFile).c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "failed open VectorIDTranslate: %s\n", p_opt.m_headIDFile.c_str()); return false; } std::uint64_t vid; while (ptr->ReadBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) == sizeof(vid)) { headVectorIDS.insert(static_cast<SizeType>(vid)); } LOG(Helper::LogLevel::LL_Info, "Loaded %u Vector IDs\n", static_cast<uint32_t>(headVectorIDS.size())); } SizeType fullCount = 0; size_t vectorInfoSize = 0; { auto fullVectors = p_reader->GetVectorSet(); fullCount = fullVectors->Count(); vectorInfoSize = fullVectors->PerVectorDataSize() + sizeof(int); } Selection selections(static_cast<size_t>(fullCount) * p_opt.m_replicaCount, p_opt.m_tmpdir); LOG(Helper::LogLevel::LL_Info, "Full vector count:%d Edge bytes:%llu selection size:%zu, capacity size:%zu\n", fullCount, sizeof(Edge), selections.m_selections.size(), selections.m_selections.capacity()); std::vector<std::atomic_int> replicaCount(fullCount); std::vector<std::atomic_int> postingListSize(headVectorIDS.size()); for (auto& pls : postingListSize) pls = 0; std::unordered_set<SizeType> emptySet; SizeType batchSize = (fullCount + p_opt.m_batches - 1) / p_opt.m_batches; auto t1 = std::chrono::high_resolution_clock::now(); if (p_opt.m_batches > 1) selections.SaveBatch(); { LOG(Helper::LogLevel::LL_Info, "Preparation done, start candidate searching.\n"); SizeType sampleSize = p_opt.m_samples; std::vector<SizeType> samples(sampleSize, 0); for (int i = 0; i < p_opt.m_batches; i++) { SizeType start = i * batchSize; SizeType end = min(start + batchSize, fullCount); auto fullVectors = p_reader->GetVectorSet(start, end); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); if (p_opt.m_batches > 1) { selections.LoadBatch(static_cast<size_t>(start) * p_opt.m_replicaCount, static_cast<size_t>(end) * p_opt.m_replicaCount); emptySet.clear(); for (auto vid : headVectorIDS) { if (vid >= start && vid < end) emptySet.insert(vid - start); } } else { emptySet = headVectorIDS; } int sampleNum = 0; for (int j = start; j < end && sampleNum < sampleSize; j++) { if (headVectorIDS.count(j) == 0) samples[sampleNum++] = j - start; } float acc = 0; #pragma omp parallel for schedule(dynamic) for (int j = 0; j < sampleNum; j++) { COMMON::Utils::atomic_float_add(&acc, COMMON::TruthSet::CalculateRecall(p_headIndex.get(), fullVectors->GetVector(samples[j]), candidateNum)); } acc = acc / sampleNum; LOG(Helper::LogLevel::LL_Info, "Batch %d vector(%d,%d) loaded with %d vectors (%zu) HeadIndex acc @%d:%f.\n", i, start, end, fullVectors->Count(), selections.m_selections.size(), candidateNum, acc); p_headIndex->ApproximateRNG(fullVectors, emptySet, candidateNum, selections.m_selections.data(), p_opt.m_replicaCount, numThreads, p_opt.m_gpuSSDNumTrees, p_opt.m_gpuSSDLeafSize, p_opt.m_rngFactor, p_opt.m_numGPUs); for (SizeType j = start; j < end; j++) { replicaCount[j] = 0; size_t vecOffset = j * (size_t)p_opt.m_replicaCount; if (headVectorIDS.count(j) == 0) { for (int resNum = 0; resNum < p_opt.m_replicaCount && selections[vecOffset + resNum].node != INT_MAX; resNum++) { ++postingListSize[selections[vecOffset + resNum].node]; selections[vecOffset + resNum].tonode = j; ++replicaCount[j]; } } } if (p_opt.m_batches > 1) selections.SaveBatch(); } } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Searching replicas ended. Search Time: %.2lf mins\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) / 60.0); if (p_opt.m_batches > 1) selections.LoadBatch(0, static_cast<size_t>(fullCount) * p_opt.m_replicaCount); // Sort results either in CPU or GPU VectorIndex::SortSelections(&selections.m_selections); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Time to sort selections:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count()) / 1000); int postingSizeLimit = INT_MAX; if (p_opt.m_postingPageLimit > 0) { postingSizeLimit = static_cast<int>(p_opt.m_postingPageLimit * PageSize / vectorInfoSize); } LOG(Helper::LogLevel::LL_Info, "Posting size limit: %d\n", postingSizeLimit); { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); for (int i = 0; i < replicaCount.size(); ++i) { if (headVectorIDS.count(i) > 0) continue; ++replicaCountDist[replicaCount[i]]; } LOG(Helper::LogLevel::LL_Info, "Before Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } #pragma omp parallel for schedule(dynamic) for (int i = 0; i < postingListSize.size(); ++i) { if (postingListSize[i] <= postingSizeLimit) continue; std::size_t selectIdx = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), i, Selection::g_edgeComparer) - selections.m_selections.begin(); for (size_t dropID = postingSizeLimit; dropID < postingListSize[i]; ++dropID) { int tonode = selections.m_selections[selectIdx + dropID].tonode; --replicaCount[tonode]; } postingListSize[i] = postingSizeLimit; } if (p_opt.m_outputEmptyReplicaID) { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize("EmptyReplicaID.bin", std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Fail to create EmptyReplicaID.bin!\n"); return false; } for (int i = 0; i < replicaCount.size(); ++i) { if (headVectorIDS.count(i) > 0) continue; ++replicaCountDist[replicaCount[i]]; if (replicaCount[i] < 2) { long long vid = i; if (ptr->WriteBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) != sizeof(vid)) { LOG(Helper::LogLevel::LL_Error, "Failt to write EmptyReplicaID.bin!"); return false; } } } LOG(Helper::LogLevel::LL_Info, "After Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } auto t4 = std::chrono::high_resolution_clock::now(); LOG(SPTAG::Helper::LogLevel::LL_Info, "Time to perform posting cut:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t4 - t3).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count()) / 1000); size_t postingFileSize = (postingListSize.size() + p_opt.m_ssdIndexFileNum - 1) / p_opt.m_ssdIndexFileNum; std::vector<size_t> selectionsBatchOffset(p_opt.m_ssdIndexFileNum + 1, 0); for (int i = 0; i < p_opt.m_ssdIndexFileNum; i++) { size_t curPostingListEnd = min(postingListSize.size(), (i + 1) * postingFileSize); selectionsBatchOffset[i + 1] = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), (SizeType)curPostingListEnd, Selection::g_edgeComparer) - selections.m_selections.begin(); } if (p_opt.m_ssdIndexFileNum > 1) selections.SaveBatch(); auto fullVectors = p_reader->GetVectorSet(); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); for (int i = 0; i < p_opt.m_ssdIndexFileNum; i++) { size_t curPostingListOffSet = i * postingFileSize; size_t curPostingListEnd = min(postingListSize.size(), (i + 1) * postingFileSize); std::vector<int> curPostingListSizes( postingListSize.begin() + curPostingListOffSet, postingListSize.begin() + curPostingListEnd); std::unique_ptr<int[]> postPageNum; std::unique_ptr<std::uint16_t[]> postPageOffset; std::vector<int> postingOrderInIndex; SelectPostingOffset(vectorInfoSize, curPostingListSizes, postPageNum, postPageOffset, postingOrderInIndex); if (p_opt.m_ssdIndexFileNum > 1) selections.LoadBatch(selectionsBatchOffset[i], selectionsBatchOffset[i + 1]); OutputSSDIndexFile((i == 0) ? outputFile : outputFile + "_" + std::to_string(i), vectorInfoSize, curPostingListSizes, selections, postPageNum, postPageOffset, postingOrderInIndex, fullVectors, curPostingListOffSet); } auto t5 = std::chrono::high_resolution_clock::now(); double elapsedSeconds = std::chrono::duration_cast<std::chrono::seconds>(t5 - t1).count(); LOG(Helper::LogLevel::LL_Info, "Total used time: %.2lf minutes (about %.2lf hours).\n", elapsedSeconds / 60.0, elapsedSeconds / 3600.0); return true; } private: struct ListInfo { int listEleCount = 0; std::uint16_t listPageCount = 0; std::uint64_t listOffset = 0; std::uint16_t pageOffset = 0; }; int LoadingHeadInfo(const std::string& p_file, int p_postingPageLimit, std::vector<ListInfo>& m_listInfos) { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize(p_file.c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "Failed to open file: %s\n", p_file.c_str()); exit(1); } int m_listCount; int m_totalDocumentCount; int m_iDataDimension; int m_listPageOffset; if (ptr->ReadBinary(sizeof(m_listCount), reinterpret_cast<char*>(&m_listCount)) != sizeof(m_listCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_totalDocumentCount), reinterpret_cast<char*>(&m_totalDocumentCount)) != sizeof(m_totalDocumentCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_iDataDimension), reinterpret_cast<char*>(&m_iDataDimension)) != sizeof(m_iDataDimension)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listPageOffset), reinterpret_cast<char*>(&m_listPageOffset)) != sizeof(m_listPageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (m_vectorInfoSize == 0) m_vectorInfoSize = m_iDataDimension * sizeof(ValueType) + sizeof(int); else if (m_vectorInfoSize != m_iDataDimension * sizeof(ValueType) + sizeof(int)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file! DataDimension and ValueType are not match!\n"); exit(1); } m_listInfos.resize(m_listCount); size_t totalListElementCount = 0; std::map<int, int> pageCountDist; size_t biglistCount = 0; size_t biglistElementCount = 0; int pageNum; for (int i = 0; i < m_listCount; ++i) { if (ptr->ReadBinary(sizeof(pageNum), reinterpret_cast<char*>(&(pageNum))) != sizeof(pageNum)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].pageOffset), reinterpret_cast<char*>(&(m_listInfos[i].pageOffset))) != sizeof(m_listInfos[i].pageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listEleCount), reinterpret_cast<char*>(&(m_listInfos[i].listEleCount))) != sizeof(m_listInfos[i].listEleCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listPageCount), reinterpret_cast<char*>(&(m_listInfos[i].listPageCount))) != sizeof(m_listInfos[i].listPageCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } m_listInfos[i].listOffset = (static_cast<uint64_t>(m_listPageOffset + pageNum) << PageSizeEx); m_listInfos[i].listEleCount = min(m_listInfos[i].listEleCount, (min(static_cast<int>(m_listInfos[i].listPageCount), p_postingPageLimit) << PageSizeEx) / m_vectorInfoSize); m_listInfos[i].listPageCount = static_cast<std::uint16_t>(ceil((m_vectorInfoSize * m_listInfos[i].listEleCount + m_listInfos[i].pageOffset) * 1.0 / (1 << PageSizeEx))); totalListElementCount += m_listInfos[i].listEleCount; int pageCount = m_listInfos[i].listPageCount; if (pageCount > 1) { ++biglistCount; biglistElementCount += m_listInfos[i].listEleCount; } if (pageCountDist.count(pageCount) == 0) { pageCountDist[pageCount] = 1; } else { pageCountDist[pageCount] += 1; } } LOG(Helper::LogLevel::LL_Info, "Finish reading header info, list count %d, total doc count %d, dimension %d, list page offset %d.\n", m_listCount, m_totalDocumentCount, m_iDataDimension, m_listPageOffset); LOG(Helper::LogLevel::LL_Info, "Big page (>4K): list count %zu, total element count %zu.\n", biglistCount, biglistElementCount); LOG(Helper::LogLevel::LL_Info, "Total Element Count: %llu\n", totalListElementCount); for (auto& ele : pageCountDist) { LOG(Helper::LogLevel::LL_Info, "Page Count Dist: %d %d\n", ele.first, ele.second); } return m_listCount; } void SelectPostingOffset(size_t p_spacePerVector, const std::vector<int>& p_postingListSizes, std::unique_ptr<int[]>& p_postPageNum, std::unique_ptr<std::uint16_t[]>& p_postPageOffset, std::vector<int>& p_postingOrderInIndex) { p_postPageNum.reset(new int[p_postingListSizes.size()]); p_postPageOffset.reset(new std::uint16_t[p_postingListSizes.size()]); struct PageModWithID { int id; std::uint16_t rest; }; struct PageModeWithIDCmp { bool operator()(const PageModWithID& a, const PageModWithID& b) const { return a.rest == b.rest ? a.id < b.id : a.rest > b.rest; } }; std::set<PageModWithID, PageModeWithIDCmp> listRestSize; p_postingOrderInIndex.clear(); p_postingOrderInIndex.reserve(p_postingListSizes.size()); PageModWithID listInfo; for (size_t i = 0; i < p_postingListSizes.size(); ++i) { if (p_postingListSizes[i] == 0) { continue; } listInfo.id = static_cast<int>(i); listInfo.rest = static_cast<std::uint16_t>((p_spacePerVector * p_postingListSizes[i]) % PageSize); listRestSize.insert(listInfo); } listInfo.id = -1; int currPageNum = 0; std::uint16_t currOffset = 0; while (!listRestSize.empty()) { listInfo.rest = PageSize - currOffset; auto iter = listRestSize.lower_bound(listInfo); if (iter == listRestSize.end()) { ++currPageNum; currOffset = 0; } else { p_postPageNum[iter->id] = currPageNum; p_postPageOffset[iter->id] = currOffset; p_postingOrderInIndex.push_back(iter->id); currOffset += iter->rest; if (currOffset > PageSize) { LOG(Helper::LogLevel::LL_Error, "Crossing extra pages\n"); exit(1); } if (currOffset == PageSize) { ++currPageNum; currOffset = 0; } currPageNum += static_cast<int>((p_spacePerVector * p_postingListSizes[iter->id]) / PageSize); listRestSize.erase(iter); } } LOG(Helper::LogLevel::LL_Info, "TotalPageNumbers: %d, IndexSize: %llu\n", currPageNum, static_cast<uint64_t>(currPageNum) * PageSize + currOffset); } void OutputSSDIndexFile(const std::string& p_outputFile, size_t p_spacePerVector, const std::vector<int>& p_postingListSizes, Selection& p_postingSelections, const std::unique_ptr<int[]>& p_postPageNum, const std::unique_ptr<std::uint16_t[]>& p_postPageOffset, const std::vector<int>& p_postingOrderInIndex, std::shared_ptr<VectorSet> p_fullVectors, size_t p_postingListOffset) { LOG(Helper::LogLevel::LL_Info, "Start output...\n"); auto t1 = std::chrono::high_resolution_clock::now(); auto ptr = SPTAG::f_createIO(); int retry = 3; while (retry > 0 && (ptr == nullptr || !ptr->Initialize(p_outputFile.c_str(), std::ios::binary | std::ios::out))) { LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_outputFile.c_str()); retry--; } if (ptr == nullptr || !ptr->Initialize(p_outputFile.c_str(), std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_outputFile.c_str()); exit(1); } std::uint64_t listOffset = sizeof(int) * 4; listOffset += (sizeof(int) + sizeof(std::uint16_t) + sizeof(int) + sizeof(std::uint16_t)) * p_postingListSizes.size(); std::unique_ptr<char[]> paddingVals(new char[PageSize]); memset(paddingVals.get(), 0, sizeof(char) * PageSize); std::uint64_t paddingSize = PageSize - (listOffset % PageSize); if (paddingSize == PageSize) { paddingSize = 0; } else { listOffset += paddingSize; } // Number of lists. int i32Val = static_cast<int>(p_postingListSizes.size()); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } // Number of all documents. i32Val = static_cast<int>(p_fullVectors->Count()); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } // Bytes of each vector. i32Val = static_cast<int>(p_fullVectors->Dimension()); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } // Page offset of list content section. i32Val = static_cast<int>(listOffset / PageSize); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } for (int i = 0; i < p_postingListSizes.size(); ++i) { int pageNum = 0; std::uint16_t pageOffset = 0; int listEleCount = 0; std::uint16_t listPageCount = 0; if (p_postingListSizes[i] > 0) { pageNum = p_postPageNum[i]; pageOffset = static_cast<std::uint16_t>(p_postPageOffset[i]); listEleCount = static_cast<int>(p_postingListSizes[i]); listPageCount = static_cast<std::uint16_t>((p_spacePerVector * p_postingListSizes[i]) / PageSize); if (0 != ((p_spacePerVector * p_postingListSizes[i]) % PageSize)) { ++listPageCount; } } if (ptr->WriteBinary(sizeof(pageNum), reinterpret_cast<char*>(&pageNum)) != sizeof(pageNum)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } if (ptr->WriteBinary(sizeof(pageOffset), reinterpret_cast<char*>(&pageOffset)) != sizeof(pageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } if (ptr->WriteBinary(sizeof(listEleCount), reinterpret_cast<char*>(&listEleCount)) != sizeof(listEleCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } if (ptr->WriteBinary(sizeof(listPageCount), reinterpret_cast<char*>(&listPageCount)) != sizeof(listPageCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } } if (paddingSize > 0) { if (ptr->WriteBinary(paddingSize, reinterpret_cast<char*>(paddingVals.get())) != paddingSize) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } } if (static_cast<uint64_t>(ptr->TellP()) != listOffset) { LOG(Helper::LogLevel::LL_Info, "List offset not match!\n"); exit(1); } LOG(Helper::LogLevel::LL_Info, "SubIndex Size: %llu bytes, %llu MBytes\n", listOffset, listOffset >> 20); listOffset = 0; std::uint64_t paddedSize = 0; for (auto id : p_postingOrderInIndex) { std::uint64_t targetOffset = static_cast<uint64_t>(p_postPageNum[id]) * PageSize + p_postPageOffset[id]; if (targetOffset < listOffset) { LOG(Helper::LogLevel::LL_Info, "List offset not match, targetOffset < listOffset!\n"); exit(1); } if (targetOffset > listOffset) { if (targetOffset - listOffset > PageSize) { LOG(Helper::LogLevel::LL_Error, "Padding size greater than page size!\n"); exit(1); } if (ptr->WriteBinary(targetOffset - listOffset, reinterpret_cast<char*>(paddingVals.get())) != targetOffset - listOffset) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } paddedSize += targetOffset - listOffset; listOffset = targetOffset; } std::size_t selectIdx = p_postingSelections.lower_bound(id + (int)p_postingListOffset); for (int j = 0; j < p_postingListSizes[id]; ++j) { if (p_postingSelections[selectIdx].node != id + (int)p_postingListOffset) { LOG(Helper::LogLevel::LL_Error, "Selection ID NOT MATCH! node:%d offset:%zu\n", id + (int)p_postingListOffset, selectIdx); exit(1); } i32Val = p_postingSelections[selectIdx++].tonode; if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } if (ptr->WriteBinary(p_fullVectors->PerVectorDataSize(), reinterpret_cast<char*>(p_fullVectors->GetVector(i32Val))) != p_fullVectors->PerVectorDataSize()) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } listOffset += p_spacePerVector; } } paddingSize = PageSize - (listOffset % PageSize); if (paddingSize == PageSize) { paddingSize = 0; } else { listOffset += paddingSize; paddedSize += paddingSize; } if (paddingSize > 0) { if (ptr->WriteBinary(paddingSize, reinterpret_cast<char*>(paddingVals.get())) != paddingSize) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndex File!"); exit(1); } } LOG(Helper::LogLevel::LL_Info, "Padded Size: %llu, final total size: %llu.\n", paddedSize, listOffset); LOG(Helper::LogLevel::LL_Info, "Output done...\n"); auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Time to write results:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count()) / 1000); } private: std::string m_extraFullGraphFile; std::vector<std::vector<ListInfo>> m_listInfos; std::vector<std::shared_ptr<Helper::DiskPriorityIO>> m_indexFiles; int m_vectorInfoSize = 0; int m_totalListCount = 0; int m_listPerFile = 0; }; } // namespace SPANN } // namespace SPTAG #endif // _SPTAG_SPANN_EXTRASEARCHER_H_
mSortParallel.c
#include<stdlib.h> #include<stdio.h> #include<string.h> #include "omp.h" #define MAX_SIZE 10000//number of elements // function to generate an array void generate_list(int * x, int n) { int i,j = MAX_SIZE; for (i = 0; i < n; i++){ x[i] = j; j--; } } // merge function void merge(int * X, int n, int * tmp) { int i = 0; int j = n/2; int ti = 0; while (i<n/2 && j<n) { if (X[i] < X[j]) { tmp[ti] = X[i]; ti++; i++; } else { tmp[ti] = X[j]; ti++; j++; } } while (i<n/2) { /* finish up lower half */ tmp[ti] = X[i]; ti++; i++; } while (j<n) { /* finish up upper half */ tmp[ti] = X[j]; ti++; j++; } memcpy(X, tmp, n*sizeof(int)); } void mergeSortSeq(int arr[], int size, int temp[]){ if (size < 2){ return; } // Sort first and second halves mergeSortSeq(arr, size/2, temp); mergeSortSeq(arr+size/2, size-size/2, temp); merge(arr, size,temp); } void mergeSortPara(int arr[], int size, int temp[] , int threads){ if (threads > 1){ // Sort first and second halves #pragma omp task mergeSortPara(arr, size/2, temp, threads/2); #pragma omp task mergeSortPara((arr+size/2), (size-size/2), (temp+size/2) , (threads-threads/2)); #pragma omp taskwait merge(arr, size, temp); }else if (threads == 1){ mergeSortSeq(arr,size,temp); } } void printArray(int A[], int size){ int i; for (i=0; i < size; i++) printf("%d ", A[i]); printf("\n"); } //function to check if array is sorted to be called after merge Sort int isSorted(int a[], int size){ int i,r = 0; for(i=0;i<size-1;i++){ if(a[i]>a[i+1]){ r = 1; printf("a[%d]=%d,a[%d]=%d \n",i,a[i],i+1,a[i+1]); break; } } return r; } int main(int argc , char * argv[]){ int threads; int *data = malloc(MAX_SIZE*sizeof(int)); int *temp = malloc(MAX_SIZE*sizeof(int)); int n = MAX_SIZE; double start,stop; printf("Given array is \n"); generate_list(data, n); printArray(data, n); start = omp_get_wtime(); #pragma omp parallel { threads = omp_get_num_threads(); #pragma omp single mergeSortPara(data, n, temp ,threads); } stop = omp_get_wtime(); printf("\nSorted array is \n"); printArray(data, n); int r = isSorted(data,n); if(r != 0){ printf("O array não está ordenado"); } printf("\nMergeSort Time: %f\n",(stop-start)); free(temp); free(data); return 0; }
GB_binop__remainder_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__remainder_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__remainder_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__remainder_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__remainder_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__remainder_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__remainder_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__remainder_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__remainder_fp64) // C=scalar+B GB (_bind1st__remainder_fp64) // C=scalar+B' GB (_bind1st_tran__remainder_fp64) // C=A+scalar GB (_bind2nd__remainder_fp64) // C=A'+scalar GB (_bind2nd_tran__remainder_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = remainder (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = remainder (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_REMAINDER || GxB_NO_FP64 || GxB_NO_REMAINDER_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__remainder_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__remainder_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__remainder_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__remainder_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__remainder_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__remainder_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__remainder_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__remainder_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__remainder_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = remainder (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__remainder_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = remainder (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = remainder (x, aij) ; \ } GrB_Info GB (_bind1st_tran__remainder_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = remainder (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__remainder_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
timestep.c
/// \file /// Leapfrog time integrator #include "timestep.h" #include <omp.h> #include "CoMDTypes.h" #include "linkCells.h" #include "parallel.h" #include "performanceTimers.h" static void advanceVelocity(SimFlat* s, int nBoxes, real_t dt); static void advancePosition(SimFlat* s, int nBoxes, real_t dt); /// Advance the simulation time to t+dt using a leap frog method /// (equivalent to velocity verlet). /// /// Forces must be computed before calling the integrator the first time. /// /// - Advance velocities half time step using forces /// - Advance positions full time step using velocities /// - Update link cells and exchange remote particles /// - Compute forces /// - Update velocities half time step using forces /// /// This leaves positions, velocities, and forces at t+dt, with the /// forces ready to perform the half step velocity update at the top of /// the next call. /// /// After nSteps the kinetic energy is computed for diagnostic output. double timestep(SimFlat* s, int nSteps, real_t dt) { for (int ii=0; ii<nSteps; ++ii) { startTimer(velocityTimer); advanceVelocity(s, s->boxes->nLocalBoxes, 0.5*dt); stopTimer(velocityTimer); startTimer(positionTimer); advancePosition(s, s->boxes->nLocalBoxes, dt); stopTimer(positionTimer); startTimer(redistributeTimer); redistributeAtoms(s); stopTimer(redistributeTimer); startTimer(computeForceTimer); computeForce(s); stopTimer(computeForceTimer); startTimer(velocityTimer); advanceVelocity(s, s->boxes->nLocalBoxes, 0.5*dt); stopTimer(velocityTimer); } kineticEnergy(s); return s->ePotential; } void computeForce(SimFlat* s) { s->pot->force(s); } void advanceVelocity(SimFlat* s, int nBoxes, real_t dt) { #pragma omp parallel for for (int iBox=0; iBox<nBoxes; iBox++) { for (int iOff=MAXATOMS*iBox,ii=0; ii<s->boxes->nAtoms[iBox]; ii++,iOff++) { s->atoms->p[iOff][0] += dt*s->atoms->f[iOff][0]; s->atoms->p[iOff][1] += dt*s->atoms->f[iOff][1]; s->atoms->p[iOff][2] += dt*s->atoms->f[iOff][2]; } } } void advancePosition(SimFlat* s, int nBoxes, real_t dt) { #pragma omp parallel for for (int iBox=0; iBox<nBoxes; iBox++) { for (int iOff=MAXATOMS*iBox,ii=0; ii<s->boxes->nAtoms[iBox]; ii++,iOff++) { int iSpecies = s->atoms->iSpecies[iOff]; real_t invMass = 1.0/s->species[iSpecies].mass; s->atoms->r[iOff][0] += dt*s->atoms->p[iOff][0]*invMass; s->atoms->r[iOff][1] += dt*s->atoms->p[iOff][1]*invMass; s->atoms->r[iOff][2] += dt*s->atoms->p[iOff][2]*invMass; } } } /// Calculates total kinetic and potential energy across all tasks. The /// local potential energy is a by-product of the force routine. void kineticEnergy(SimFlat* s) { real_t eLocal[2]; real_t kenergy = 0.0; eLocal[0] = s->ePotential; eLocal[1] = 0; #pragma omp parallel for reduction(+:kenergy) for (int iBox=0; iBox<s->boxes->nLocalBoxes; iBox++) { for (int iOff=MAXATOMS*iBox,ii=0; ii<s->boxes->nAtoms[iBox]; ii++,iOff++) { int iSpecies = s->atoms->iSpecies[iOff]; real_t invMass = 0.5/s->species[iSpecies].mass; kenergy += ( s->atoms->p[iOff][0] * s->atoms->p[iOff][0] + s->atoms->p[iOff][1] * s->atoms->p[iOff][1] + s->atoms->p[iOff][2] * s->atoms->p[iOff][2] )*invMass; } } eLocal[1] = kenergy; real_t eSum[2]; startTimer(commReduceTimer); addRealParallel(eLocal, eSum, 2); stopTimer(commReduceTimer); s->ePotential = eSum[0]; s->eKinetic = eSum[1]; } /// \details /// This function provides one-stop shopping for the sequence of events /// that must occur for a proper exchange of halo atoms after the atom /// positions have been updated by the integrator. /// /// - updateLinkCells: Since atoms have moved, some may be in the wrong /// link cells. /// - haloExchange (atom version): Sends atom data to remote tasks. /// - sort: Sort the atoms. /// /// \see updateLinkCells /// \see initAtomHaloExchange /// \see sortAtomsInCell void redistributeAtoms(SimFlat* sim) { updateLinkCells(sim->boxes, sim->atoms); startTimer(atomHaloTimer); haloExchange(sim->atomExchange, sim); stopTimer(atomHaloTimer); #pragma omp parallel for for (int ii=0; ii<sim->boxes->nTotalBoxes; ++ii) sortAtomsInCell(sim->atoms, sim->boxes, ii); }
atomic-1.c
/* { dg-do run } */ /* { dg-options "-O2 -fopenmp" } */ /* { dg-options "-O2 -fopenmp -march=pentium" { target i?86-*-* x86_64-*-* } } */ /* { dg-options "-O2 -fopenmp" { target lp64 } } */ #ifdef __i386__ #include "../../../gcc/testsuite/gcc.dg/i386-cpuid.h" #define bit_CX8 (1 << 8) #endif extern void abort (void); double d; struct { int i; double e; int j; } x; void f1 (void) { #pragma omp atomic d += 7.5; #pragma omp atomic d *= 2.5; #pragma omp atomic d /= 0.25; } void f2 (void) { #pragma omp atomic x.e += 7.5; #pragma omp atomic x.e *= 2.5; #pragma omp atomic x.e /= 0.25; } int main (void) { #ifdef __i386__ unsigned long cpu_facilities; cpu_facilities = i386_cpuid (); if ((cpu_facilities & bit_CX8) == 0) return 0; #endif d = 1.0; f1 (); if (d != 85.0) abort (); x.e = 1.0; f2 (); if (x.i != 0 || x.e != 85.0 || x.j != 0) abort (); return 0; }
QED_AEG.h
#pragma once #include "Constants.h" #include "Ensemble.h" #include "Grid.h" #include "Pusher.h" #include "synchrotron.h" #include <omp.h> #include <random> using namespace constants; namespace pfc { class ScalarQED_AEG_only_electron : public ParticlePusher { public: ScalarQED_AEG_only_electron() { MinProbability = 5e-4; MaxProbability = 0.01; SchwingerField = sqr(Constants<FP>::electronMass() * Constants<FP>::lightVelocity()) * Constants<FP>::lightVelocity() / (-Constants<FP>::electronCharge() * Constants<FP>::planck()); preFactor = sqr(Constants<FP>::electronCharge()) * Constants<FP>::electronMass() * Constants<FP>::lightVelocity() / sqr(Constants<FP>::planck()); coeffPhoton_probability = 1.0; coeffPair_probability = 0.0; distribution = std::uniform_real_distribution<FP>(0.0, 1.0); int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif AvalanchePhotons.resize(max_threads); AvalancheParticles.resize(max_threads); afterAvalanchePhotons.resize(max_threads); afterAvalancheParticles.resize(max_threads); } void processParticlesNIter(Ensemble3d* particles, pyYeeField* grid, FP timeStep, FP startTime, int N) { for (int i = 0; i < N; i++) { grid->setTime(startTime + i * timeStep); processParticles(particles, grid, timeStep); } } void processParticles(Ensemble3d* particles, pyYeeField* pyGrid, FP timeStep) { int max_threads; #ifdef __USE_OMP__ max_threads = omp_get_max_threads(); #else max_threads = 1; #endif for (int th = 0; th < max_threads; th++) { AvalanchePhotons[th].clear(); AvalancheParticles[th].clear(); afterAvalanchePhotons[th].clear(); afterAvalancheParticles[th].clear(); } if ((*particles)[Photon].size() && coeffPair_probability != 0) HandlePhotons((*particles)[Photon], pyGrid->getGrid(), timeStep); if ((*particles)[Electron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Electron], pyGrid->getGrid(), timeStep); if ((*particles)[Positron].size() && coeffPhoton_probability != 0) HandleParticles((*particles)[Positron], pyGrid->getGrid(), timeStep); for (int th = 0; th < max_threads; th++) { for (int ind = 0; ind < afterAvalanchePhotons[th].size(); ind++) { particles->addParticle(afterAvalanchePhotons[th][ind]); } for (int ind = 0; ind < afterAvalancheParticles[th].size(); ind++) { particles->addParticle(afterAvalancheParticles[th][ind]); } } } void Boris(Particle3d&& particle, const FP3& e, const FP3& b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void Boris(ParticleProxy3d&& particle, const FP3& e, const FP3& b, FP timeStep) { FP eCoeff = timeStep * particle.getCharge() / (2 * particle.getMass() * Constants<FP>::lightVelocity()); FP3 eMomentum = e * eCoeff; FP3 um = particle.getP() + eMomentum; FP3 t = b * eCoeff / sqrt((FP)1 + um.norm2()); FP3 uprime = um + cross(um, t); FP3 s = t * (FP)2 / ((FP)1 + t.norm2()); particle.setP(eMomentum + um + cross(uprime, s)); particle.setPosition(particle.getPosition() + timeStep * particle.getVelocity()); } void HandlePhotons(ParticleArray3d& particles, YeeGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 k = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); k = (1 / k.norm()) * k; // normalized wave vector particles[i].setPosition(pPos + dt * Constants<FP>::lightVelocity() * k); FP H_eff = sqrt(sqr(e + VP(k, b)) - sqr(SP(e, k))); FP HE = H_eff / SchwingerField; FP pGamma = particles[i].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP EstimatedProbability = dt * estimatedPhotons(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) continue; else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Pair_Generator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta * particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta) * particles[i].getMomentum()); afterAvalancheParticles[thread_id].push_back(NewParticle); //deletePhoton } } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalanchePhotons[thread_id].push_back(particles[i]); particles[i].setPosition(particles[i].getPosition() - dt * Constants<FP>::lightVelocity() * k); // go back RunAvalanche(H_eff, e, b, Photon, pGamma, dt); //deletePhoton for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); for (int k = 0; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void HandleParticles(ParticleArray3d& particles, YeeGrid* grid, FP timeStep) { FP dt = timeStep; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < particles.size(); i++) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif FP3 pPos = particles[i].getPosition(); FP3 v = particles[i].getVelocity(); FP3 e, b; e = grid->getE(pPos); b = grid->getB(pPos); FP H_eff = sqr(e + (1 / Constants<FP>::lightVelocity()) * VP(v, b)) - sqr(SP(e, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP pGamma = particles[i].getGamma(); FP HE = H_eff / SchwingerField; FP EstimatedProbability = dt * estimatedParticles(HE, pGamma); FP Factor = 1; if (EstimatedProbability < MinProbability) { FP r0 = random_number_omp(); if (r0 > EstimatedProbability / MinProbability) { Boris(particles[i], e, b, dt); continue; } else Factor = MinProbability / EstimatedProbability; } if (EstimatedProbability < MaxProbability) { //=======handle single event======== double gamma = pGamma; double chi = gamma * H_eff / SchwingerField; double delta = Photon_MGenerator(Factor, chi, gamma, dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(particles[i].getWeight()); NewParticle.setPosition(particles[i].getPosition()); NewParticle.setMomentum(delta * particles[i].getMomentum()); afterAvalanchePhotons[thread_id].push_back(NewParticle); particles[i].setMomentum((1 - delta) * particles[i].getMomentum()); } Boris(particles[i], e, b, dt); } else { //=======handle avalanche======== AvalancheParticles[thread_id].clear(); AvalanchePhotons[thread_id].clear(); AvalancheParticles[thread_id].push_back(particles[i]); RunAvalanche(H_eff, e, b, particles[i].getType(), pGamma, dt); for (int k = 0; k != AvalanchePhotons[thread_id].size(); k++) afterAvalanchePhotons[thread_id].push_back(AvalanchePhotons[thread_id][k]); particles[i].setMomentum(AvalancheParticles[thread_id][0].getMomentum()); particles[i].setPosition(AvalancheParticles[thread_id][0].getPosition()); for (int k = 1; k != AvalancheParticles[thread_id].size(); k++) afterAvalancheParticles[thread_id].push_back(AvalancheParticles[thread_id][k]); } } } void RunAvalanche(double H_eff_global, const FP3& E, const FP3& B, int SeedType, double gamma, double dt) { int thread_id; #ifdef __USE_OMP__ thread_id = omp_get_thread_num(); #else thread_id = 0; #endif vector<Particle3d>& AvalancheParticles = this->AvalancheParticles[thread_id]; vector<Particle3d>& AvalanchePhotons = this->AvalanchePhotons[thread_id]; gamma = max(gamma, 1.0); FP HE = H_eff_global / SchwingerField; FP sub_dt = MaxProbability / estimatedParticles(HE, gamma); int NT = 1 + int(dt / sub_dt); sub_dt = dt / FP(NT); for (int i = 0; i != NT; i++) { for (int k = 0; k != AvalancheParticles.size(); k++) { Boris(AvalancheParticles[k], E, B, sub_dt); FP3 v = AvalancheParticles[k].getVelocity(); FP H_eff = sqr(E + (1 / Constants<FP>::lightVelocity()) * VP(v, B)) - sqr(SP(E, v) / Constants<FP>::lightVelocity()); if (H_eff < 0) H_eff = 0; H_eff = sqrt(H_eff); FP gamma = AvalancheParticles[k].getGamma(); FP chi = gamma * H_eff / SchwingerField; FP delta = Photon_MGenerator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Photon); NewParticle.setWeight(AvalancheParticles[k].getWeight()); NewParticle.setPosition(AvalancheParticles[k].getPosition()); NewParticle.setMomentum(delta * AvalancheParticles[k].getMomentum()); AvalanchePhotons.push_back(NewParticle); AvalancheParticles[k].setMomentum((1 - delta) * AvalancheParticles[k].getMomentum()); } } for (int k = 0; k < AvalanchePhotons.size(); k++) { FP3 k_ = AvalanchePhotons[k].getVelocity(); k_ = (1 / k_.norm()) * k_; // normalized wave vector AvalanchePhotons[k].setPosition(AvalanchePhotons[k].getPosition() + sub_dt * Constants<FP>::lightVelocity() * k_); FP H_eff = sqrt(sqr(E + VP(k_, B)) - sqr(SP(E, k_))); FP gamma = AvalanchePhotons[k].getMomentum().norm() / (Constants<FP>::electronMass() * Constants<FP>::lightVelocity()); FP chi = gamma * H_eff / SchwingerField; FP delta = Pair_Generator(1, chi, gamma, sub_dt); if (delta != 0) { Particle3d NewParticle; NewParticle.setType(Electron); NewParticle.setWeight(AvalanchePhotons[k].getWeight()); NewParticle.setPosition(AvalanchePhotons[k].getPosition()); NewParticle.setMomentum(delta * AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); NewParticle.setType(Positron); NewParticle.setMomentum((1 - delta) * AvalanchePhotons[k].getMomentum()); AvalancheParticles.push_back(NewParticle); AvalanchePhotons[k] = AvalanchePhotons[AvalanchePhotons.size() - 1]; AvalanchePhotons.pop_back(); k--; } } } } FP estimatedPhotons(FP HE, FP gamma) { return (0.0827 * HE) * preFactor; } FP estimatedParticles(FP HE, FP gamma) { FP b = 3.0 / 2.0 * HE * gamma; FP newFactor; if (b < 0.1) { newFactor = 0.962436 * b / gamma + 0.0827 * HE; } else if (b < 0.5) { newFactor = 0.779009 * pow(b, 11.0 / 12.0) / gamma + 0.0827 * HE; } else if (b < 10) { newFactor = 0.721193 * pow(b, 19.0 / 24.0) / gamma + 0.0827 * HE; } else { newFactor = 0.955556 * pow(b, 2.0 / 3.0) / gamma + 0.0827 * HE; } return newFactor * preFactor; } FP Photon_probability(FP chi, FP gamma, FP d) { FP z = (2 / 3.0) * (1 / chi) * d / (1 - d); FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPhoton_probability; if ((z < 700) && (z > 0)) return coeff * (chi / gamma) * ((1 - d) / d) * (synchrotron_1(z) + (3 / 2.0) * d * chi * z * synchrotron_2(z)); else return 0; } FP Pair_probability(FP chi, FP gamma, FP d) { FP z_p = (2 / 3.0) / (chi * (1 - d) * d); FP coeff = (sqrt(3.0) / (2.0 * pi)) * coeffPair_probability; if ((z_p < 700) && (z_p > 0)) return coeff * (chi / gamma) * (d - 1) * d * (synchrotron_1(z_p) - (3 / 2.0) * chi * z_p * synchrotron_2(z_p)); else return 0; } FP Pair_Generator(FP Factor, FP chi, FP gamma, FP dt) //returns photon energy in mc2gamma in case of generation. { FP factor = Factor * dt * preFactor; FP r1 = random_number_omp(); FP r2 = random_number_omp(); if (r2 < factor * Pair_probability(chi, gamma, r1)) return r1; else return 0; } FP Photon_MGenerator(FP Factor, FP chi, FP gamma, FP dt) //Modified event generator: returns photon energy in mc2gamma in case of generation, !doesn't change gamma { double r0 = random_number_omp(); double r1 = r0 * r0 * r0; double r2 = random_number_omp(); double factor = Factor * dt * preFactor; if (r2 < factor * Photon_probability(chi, gamma, r1) * 3 * r0 * r0) return r1; else return 0; } void operator()(ParticleProxy3d* particle, ValueField field, FP timeStep) {} void operator()(Particle3d* particle, ValueField field, FP timeStep) { ParticleProxy3d particleProxy(*particle); this->operator()(&particleProxy, field, timeStep); } private: FP random_number_omp() { FP rand_n; #pragma omp critical rand_n = distribution(rand_generator); return rand_n; } FP MinProbability, MaxProbability; FP SchwingerField; FP preFactor; FP coeffPhoton_probability, coeffPair_probability; std::default_random_engine rand_generator; std::uniform_real_distribution<FP> distribution; vector<vector<Particle3d>> AvalanchePhotons, AvalancheParticles; vector<vector<Particle3d>> afterAvalanchePhotons, afterAvalancheParticles; }; }
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; }
utils.c
// Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. // Copyright 2015. UChicago Argonne, LLC. This software was produced // under U.S. Government contract DE-AC02-06CH11357 for Argonne National // Laboratory (ANL), which is operated by UChicago Argonne, LLC for the // U.S. Department of Energy. The U.S. Government has rights to use, // reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR // UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR // ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is // modified to produce derivative works, such modified software should // be clearly marked, so as not to confuse it with the version available // from ANL. // Additionally, redistribution and use in source and binary forms, with // or without modification, are permitted provided that the following // conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the // distribution. // * Neither the name of UChicago Argonne, LLC, Argonne National // Laboratory, ANL, the U.S. Government, nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago // Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <float.h> #include <stdint.h> #include "utils.h" // for windows build #ifdef WIN32 # ifdef PY3K void PyInit_libtomopy(void) { } # else void initlibtomopy(void) { } # endif #endif //======================================================================================// void preprocessing(int ry, int rz, int num_pixels, float center, float* mov, float* gridx, float* gridy) { for(int i = 0; i <= ry; ++i) { gridx[i] = -ry * 0.5f + i; } for(int i = 0; i <= rz; ++i) { gridy[i] = -rz * 0.5f + i; } *mov = ((float) num_pixels - 1) * 0.5f - center; if(*mov - floor(*mov) < 0.01f) { *mov += 0.01f; } *mov += 0.5; } //======================================================================================// int calc_quadrant(float theta_p) { // here we cast the float to an integer and rescale the integer to // near INT_MAX to retain the precision. This method was tested // on 1M random random floating points between -2*pi and 2*pi and // was found to produce a speed up of: // // - 14.5x (Intel i7 MacBook) // - 2.2x (NERSC KNL) // - 1.5x (NERSC Edison) // - 1.7x (NERSC Haswell) // // with a 0.0% incorrect quadrant determination rate // const int32_t ipi_c = 340870420; int32_t theta_i = (int32_t)(theta_p * ipi_c); theta_i += (theta_i < 0) ? (2.0f * M_PI * ipi_c) : 0; return ((theta_i >= 0 && theta_i < 0.5f * M_PI * ipi_c) || (theta_i >= 1.0f * M_PI * ipi_c && theta_i < 1.5f * M_PI * ipi_c)) ? 1 : 0; } //======================================================================================// void calc_coords(int ry, int rz, float xi, float yi, float sin_p, float cos_p, const float* gridx, const float* gridy, float* coordx, float* coordy) { float srcx = xi * cos_p - yi * sin_p; float srcy = xi * sin_p + yi * cos_p; float detx = -xi * cos_p - yi * sin_p; float dety = -xi * sin_p + yi * cos_p; float slope = (srcy - dety) / (srcx - detx); float islope = (srcx - detx) / (srcy - dety); #pragma omp simd for(int n = 0; n <= rz; ++n) { coordx[n] = islope * (gridy[n] - srcy) + srcx; } #pragma omp simd for(int n = 0; n <= ry; ++n) { coordy[n] = slope * (gridx[n] - srcx) + srcy; } } //======================================================================================// void trim_coords(int ry, int rz, const float* coordx, const float* coordy, const float* gridx, const float* gridy, int* asize, float* ax, float* ay, int* bsize, float* bx, float* by) { *asize = 0; *bsize = 0; float gridx_gt = gridx[0] + 0.01f; float gridx_le = gridx[ry] - 0.01f; for(int n = 0; n <= rz; ++n) { if(coordx[n] >= gridx_gt && coordx[n] <= gridx_le) { ax[*asize] = coordx[n]; ay[*asize] = gridy[n]; ++(*asize); } } float gridy_gt = gridy[0] + 0.01f; float gridy_le = gridy[rz] - 0.01f; for(int n = 0; n <= ry; ++n) { if(coordy[n] >= gridy_gt && coordy[n] <= gridy_le) { bx[*bsize] = gridx[n]; by[*bsize] = coordy[n]; ++(*bsize); } } } //======================================================================================// void sort_intersections(int ind_condition, int asize, const float* ax, const float* ay, int bsize, const float* bx, const float* by, int* csize, float* coorx, float* coory) { int i = 0, j = 0, k = 0; if(ind_condition == 0) { while(i < asize && j < bsize) { if(ax[asize - 1 - i] < bx[j]) { coorx[k] = ax[asize - 1 - i]; coory[k] = ay[asize - 1 - i]; ++i; } else { coorx[k] = bx[j]; coory[k] = by[j]; ++j; } ++k; } while(i < asize) { coorx[k] = ax[asize - 1 - i]; coory[k] = ay[asize - 1 - i]; ++i; ++k; } while(j < bsize) { coorx[k] = bx[j]; coory[k] = by[j]; ++j; ++k; } (*csize) = asize + bsize; } else { while(i < asize && j < bsize) { if(ax[i] < bx[j]) { coorx[k] = ax[i]; coory[k] = ay[i]; ++i; } else { coorx[k] = bx[j]; coory[k] = by[j]; ++j; } ++k; } while(i < asize) { coorx[k] = ax[i]; coory[k] = ay[i]; ++i; ++k; } while(j < bsize) { coorx[k] = bx[j]; coory[k] = by[j]; ++j; ++k; } (*csize) = asize + bsize; } } //======================================================================================// void calc_dist(int ry, int rz, int csize, const float* coorx, const float* coory, int* indi, float* dist) { if(csize < 2) return; const int _size = csize - 1; //------------------------------------------------------------------------// // calculate dist //------------------------------------------------------------------------// { float* _diffx = malloc(_size * sizeof(float)); float* _diffy = malloc(_size * sizeof(float)); #pragma omp simd for(int n = 0; n < _size; ++n) { _diffx[n] = (coorx[n + 1] - coorx[n]) * (coorx[n + 1] - coorx[n]); } #pragma omp simd for(int n = 0; n < _size; ++n) { _diffy[n] = (coory[n + 1] - coory[n]) * (coory[n + 1] - coory[n]); } #pragma omp simd for(int n = 0; n < _size; ++n) { dist[n] = sqrtf(_diffx[n] + _diffy[n]); } free(_diffx); free(_diffy); } //------------------------------------------------------------------------// // calculate indi //------------------------------------------------------------------------// int* _indx = malloc(_size * sizeof(int)); int* _indy = malloc(_size * sizeof(int)); #pragma omp simd for(int n = 0; n < _size; ++n) { float _midx = 0.5f * (coorx[n + 1] + coorx[n]); float _x1 = _midx + 0.5f * ry; float _i1 = (int) (_midx + 0.5f * ry); _indx[n] = _i1 - (_i1 > _x1); } #pragma omp simd for(int n = 0; n < _size; ++n) { float _midy = 0.5f * (coory[n + 1] + coory[n]); float _x2 = _midy + 0.5f * rz; float _i2 = (int) (_midy + 0.5f * rz); _indy[n] = _i2 - (_i2 > _x2); } #pragma omp simd for(int n = 0; n < _size; ++n) { indi[n] = _indy[n] + (_indx[n] * rz); } free(_indx); free(_indy); } //======================================================================================// void calc_dist2(int ry, int rz, int csize, const float* coorx, const float* coory, int* indx, int* indy, float* dist) { #pragma omp simd for(int n = 0; n < csize - 1; ++n) { float diffx = coorx[n + 1] - coorx[n]; float diffy = coory[n + 1] - coory[n]; dist[n] = sqrt(diffx * diffx + diffy * diffy); } #pragma omp simd for(int n = 0; n < csize - 1; ++n) { float midx = (coorx[n + 1] + coorx[n]) * 0.5f; float midy = (coory[n + 1] + coory[n]) * 0.5f; float x1 = midx + ry * 0.5f; float x2 = midy + rz * 0.5f; int i1 = (int) (midx + ry * 0.5f); int i2 = (int) (midy + rz * 0.5f); indx[n] = i1 - (i1 > x1); indy[n] = i2 - (i2 > x2); } } //======================================================================================// void calc_simdata(int s, int p, int d, int ry, int rz, int dt, int dx, int csize, const int* indi, const float* dist, const float* model, float* simdata) { int index_model = s * ry * rz; int index_data = d + p * dx + s * dt * dx; for(int n = 0; n < csize - 1; ++n) { simdata[index_data] += model[indi[n] + index_model] * dist[n]; } } //======================================================================================// void calc_simdata2(int s, int p, int d, int ry, int rz, int dt, int dx, int csize, const int* indx, const int* indy, const float* dist, float vx, float vy, const float* modelx, const float* modely, float* simdata) { int n; for(n = 0; n < csize - 1; n++) { simdata[d + p * dx + s * dt * dx] += (modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx + modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) * dist[n]; } } //======================================================================================// void calc_simdata3(int s, int p, int d, int ry, int rz, int dt, int dx, int csize, const int* indx, const int* indy, const float* dist, float vx, float vy, const float* modelx, const float* modely, const float* modelz, int axis, float* simdata) { int n; if(axis == 0) { for(n = 0; n < csize - 1; n++) { simdata[d + p * dx + s * dt * dx] += (modelx[indy[n] + indx[n] * rz + s * ry * rz] * vx + modely[indy[n] + indx[n] * rz + s * ry * rz] * vy) * dist[n]; } } else if(axis == 1) { for(n = 0; n < csize - 1; n++) { simdata[d + p * dx + s * dt * dx] += (modely[s + indx[n] * rz + indy[n] * ry * rz] * vx + modelz[s + indx[n] * rz + indy[n] * ry * rz] * vy) * dist[n]; } } else if(axis == 2) { for(n = 0; n < csize - 1; n++) { simdata[d + p * dx + s * dt * dx] += (modelx[indx[n] + s * rz + indy[n] * ry * rz] * vx + modelz[indx[n] + s * rz + indy[n] * ry * rz] * vy) * dist[n]; } } } //======================================================================================//
taskbench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.0 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2011. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "common.h" #include "taskbench.h" #define DEPTH 6 int main(int argc, char **argv) { init(argc, argv); #ifdef OMPVER3 /* GENERATE REFERENCE TIME */ reference("reference time 1", &refer); /* TEST PARALLEL TASK GENERATION */ benchmark("PARALLEL TASK", &testParallelTaskGeneration); /* TEST MASTER TASK GENERATION */ benchmark("MASTER TASK", &testMasterTaskGeneration); /* TEST MASTER TASK GENERATION WITH BUSY SLAVES */ benchmark("MASTER TASK BUSY SLAVES", &testMasterTaskGenerationWithBusySlaves); /* TEST CONDITIONAL TASK GENERATION */ #ifndef DISABLE_CONDITIONAL_TASK_TEST benchmark("CONDITIONAL TASK", &testConditionalTaskGeneration); #endif // DISABLE_CONDITIONAL_TASK_TEST /* TEST TASK WAIT */ benchmark("TASK WAIT", &testTaskWait); /* TEST TASK BARRIER */ #ifndef DISABLE_BARRIER_TEST benchmark("TASK BARRIER", &testTaskBarrier); #endif //DISABLE_BARRIER_TEST #ifndef DISABLE_NESTED_TASKS_TESTS /* TEST NESTED TASK GENERATION */ benchmark("NESTED TASK", &testNestedTaskGeneration); /* TEST NESTED MASTER TASK GENERATION */ benchmark("NESTED MASTER TASK", &testNestedMasterTaskGeneration); #endif // DISABLE_NESTED_TASKS_TESTS /* GENERATE THE SECOND REFERENCE TIME */ reference("reference time 2", &refer); /* TEST BRANCH TASK TREE */ benchmark("BRANCH TASK TREE", &testBranchTaskGeneration); /* TEST LEAF TASK TREE */ benchmark("LEAF TASK TREE", &testLeafTaskGeneration); #endif // OMPVER3 finalise(); return EXIT_SUCCESS; } /* Calculate the reference time. */ void refer() { int j; for (j = 0; j < innerreps; j++) { delay(delaylength); } } /* Calculate the second reference time. */ void refer2() { int j; for (j = 0; j < (innerreps >> DEPTH) * (1 << DEPTH); j++) { delay(delaylength); }; } /* Test parallel task generation overhead */ void testParallelTaskGeneration() { int j; #pragma omp parallel private( j ) { for ( j = 0; j < innerreps; j ++ ) { #pragma omp task { delay( delaylength ); } // task }; // for j } // parallel } /* Test master task generation overhead */ void testMasterTaskGeneration() { int j; #pragma omp parallel private(j) { #pragma omp master { /* Since this is executed by one thread we need innerreps * nthreads iterations */ for (j = 0; j < innerreps * nthreads; j++) { #pragma omp task { delay(delaylength); } } /* End for j */ } /* End master */ } /* End parallel */ } /* Test master task generation overhead when the slave threads are busy */ void testMasterTaskGenerationWithBusySlaves() { int j; #pragma omp parallel private( j ) { int thread_num = omp_get_thread_num(); for (j = 0; j < innerreps; j ++ ) { if ( thread_num == 0 ) { #pragma omp task { delay( delaylength ); } // task } else { delay( delaylength ); }; // if }; // for j } // parallel } /* Measure overhead of checking if a task should be spawned. */ void testConditionalTaskGeneration() { int j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp task if(returnfalse()) { delay( delaylength ); } } } } #ifndef DISABLE_NESTED_TASKS_TESTS /* Measure overhead of nested tasks (all threads construct outer tasks) */ void testNestedTaskGeneration() { int i,j; #pragma omp parallel private( i, j ) { for ( j = 0; j < innerreps / nthreads; j ++ ) { #pragma omp task private( i ) { for ( i = 0; i < nthreads; i ++ ) { #pragma omp task untied { delay( delaylength ); } // task }; // for i // wait for inner tasks to complete #pragma omp taskwait } // task }; // for j } // parallel } /* Measure overhead of nested tasks (master thread constructs outer tasks) */ void testNestedMasterTaskGeneration() { int i, j; #pragma omp parallel private( i, j ) { #pragma omp master { for ( j = 0; j < innerreps; j ++ ) { #pragma omp task private( i ) { for ( i = 0; i < nthreads; i ++ ) { #pragma omp task { delay( delaylength ); } // task }; // for i // wait for inner tasks to complete #pragma omp taskwait } // task }; // for j } // master } // parallel } #endif // DISABLE_NESTED_TASKS_TESTS /* Measure overhead of taskwait (all threads construct tasks) */ void testTaskWait() { int j; #pragma omp parallel private( j ) { for ( j = 0; j < innerreps; j ++ ) { #pragma omp task { delay( delaylength ); } // task #pragma omp taskwait }; // for j } // parallel } /* Measure overhead of tasking barrier (all threads construct tasks) */ void testTaskBarrier() { int j; #pragma omp parallel private( j ) { for ( j = 0; j < innerreps; j ++ ) { #pragma omp task { delay( delaylength ); } // task #pragma omp barrier }; // for j } // parallel } /* Test parallel task generation overhead where work is done at all levels. */ void testBranchTaskGeneration() { int j; #pragma omp parallel private(j) { for (j = 0; j < (innerreps >> DEPTH); j++) { #pragma omp task { branchTaskTree(DEPTH); delay(delaylength); } } } } void branchTaskTree(int tree_level) { if ( tree_level > 0 ) { #pragma omp task { branchTaskTree(tree_level - 1); branchTaskTree(tree_level - 1); delay(delaylength); } } } /* Test parallel task generation overhead where work is done only at the leaf level. */ void testLeafTaskGeneration() { int j; #pragma omp parallel private(j) { for (j = 0; j < (innerreps >> DEPTH); j++) { leafTaskTree(DEPTH); } } } void leafTaskTree(int tree_level) { if ( tree_level == 0 ) { delay(delaylength); } else { #pragma omp task { leafTaskTree(tree_level - 1); leafTaskTree(tree_level - 1); } } }
GB_unaryop__ainv_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_uint32 // op(A') function: GB_tran__ainv_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
morph_library.h
#ifndef MORPH_LIBRARY_H #define MORPH_LIBRARY_H #include "CImgFloatWrapper.h" #include "loewner_declaration.h" #include "conversions.h" #include "morph_color_matrix.h" #include "morph_circle.h" #include "morph_smallest_circle_mask.h" #include "einstein_operations.h" #include "omp.h" #include <stdio.h> #include <stdlib.h> #include <cmath> #include <errno.h> #include <sys/time.h> #include <string.h> #define open_file(file_ptr, file_name, mode)\ do {\ if (((file_ptr) = fopen((file_name), (mode))) == NULL) {\ fprintf(stderr, "%s:%d: error while opening file %s: %s\n", __FILE__, __LINE__, (file_name), strerror(errno));\ exit(EXIT_FAILURE);\ }\ } while(0) #define close_file(file_ptr, file_name)\ do {\ if (fclose((file_ptr)) != 0) {\ fprintf(stderr, "%s:%d: error while closing file %s: %s\n", __FILE__, __LINE__, (file_name), strerror(errno));\ exit(EXIT_FAILURE);\ }\ } while(0) /* *============================================================================================================== * Class that contains morphological operations which are introduced in the paper of B. Burgeth and A. Kleefeld *============================================================================================================== */ class LoewnerMorphology::Morph { public: /* * Constructor of the class Morph. It takes the name of the file where the image is stored, the name of the file where a * structuring element (a mask) is stored and dimension of the structuring elements as arguments. */ Morph(const char *imageFile, const char *maskFile, int maskDim, int numberOfThreads = 8); /* * Destructor of the class Morph. */ ~Morph(); // MORPHOLOGICAL OPERATION /* * Performs morphological opration dilation on the input image. */ void dilation(int iter = 1); /* * Performs morphological operation erosion on the input image. */ void erosion(int iter = 1); /* * Performs morphological operation closing on the input image. */ void closing(int iter = 1); /* * Performs morphological operation opening on the input image. */ void opening(int iter = 1); /* * Performs morphological operation black top hat on the input image. */ void blackTopHat(int iter = 1); /* * Performs morphological operation white top hat on the input image. */ void whiteTopHat(int iter = 1); /* * Performs morphological operation self-dual top hat on the input image. */ void selfDualTopHat(int iter = 1); /* * Performs morphological operation beucher gradient on the input image. */ void beucherGradient(int iter = 1); /* * Performs morphological operation internal gradient on the input image. */ void externalGradient(int iter = 1); /* * Performs morphological operation internal gradient on the input image. */ void internalGradient(int iter = 1); /* * Performs morphological operation morphological laplacian on the input image. */ void laplacian(int iter = 1); /* * Performs morphological operation shock filter on the input image. */ void shockFilter(int iter = 1); /* * Displays the original image. */ void displayOriginalImage(); /* * Displays the result of the morphological operation if the operation was called. */ void displayResultImage(); /* * Returns the result image as an array of floats. It allocates memory equal to the size of the image times spectrum. */ float *returnResult(); /* * Saves the result image to the file which name is provided as a filename argument. */ void saveResult(const char *fileName); private: using Circle = LoewnerMorphology::MorphCircle; CImgFloatWrapper *inputImage; // input image CImgFloatWrapper *outputImage; // output image - after morphological operation int *mask; // mask array int padding; // mask padding LoewnerMorphology::MorphColorMatrix *matrices; // input image converted to array of MorphColorMatrix objects LoewnerMorphology::MorphColorMatrix *result; // result image converted to array of MorphColorMatrix objects int width; // width of the image int height; // height of the image int spectrum; // spectrum of the image int size; // size of the image // HANDLERS /* * Method that performes modified commutative Einstein subtraction of two images that are given on memory lications image1 and * image2. Both images have the size width * hight, with respective leading dimensions, lda1 and lda2. Precisely, the operation * image3 = image1 - image2 is perfomed. The result is stored on memory location image3, with leading dimension lda3. */ template<typename T> static void morph_einstein_launcher(T *image1, T *image2, T *imageResult, int width, int height, int lda1, int lda2, int lda3); /* * The method responsible for calculating morphological operations on a (2 * padding + 1)-dimensional squared matrix stored on * memory location start. Argument pWidth is the appropriate lda matrix lda. The matrix contains MorphCircle objects as elements. * The calculation is performed using the approach presented in the paper, based on Loewner order. The method returns the result * of the wanted morphological operation in form of MorphColorMatrix. * * Argument type determines a morphological operation: * 1) false -> DILATION * 2) true -> EROSION */ static MorphColorMatrix morph_basic_operation(Circle *start, int pWidth, int *mask, int padding, bool type); /* * The method responsible for invoking calculations needed for performing a basic morphological operation on given image vector * which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is * expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects * of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation, * which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a * mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height. * Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the * smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> DILATION * 2) true -> EROSION */ template<typename T> static void morph_basic_handler(Circle *inPrepared, T* in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a wanted basic morphological operation on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, a constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> DILATION * 2) true -> EROSION */ template<typename T> static void morph_basic_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for invoking calculations needed for performing a morphological operation on given image vector * which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is * expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects * of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation, * which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a * mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height. * Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the * smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> CLOSING * 2) true -> OPENING */ template<typename T> static void morph_second_order_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a wanted basic morphological operation on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> CLOSING * 2) true -> OPENING */ template<typename T> static void morph_second_order_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for invoking calculations needed for performing a morphological operation on given image vector * which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is * expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects * of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation, * which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a * mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height. * Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the * smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> BLACK TOP HAT * 2) true -> WHITE TOP HAT */ template<typename T> static void morph_hats_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a wanted morphological operation on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> BLACK TOP HAT * 2) true -> WHITE TOP HAT */ template<typename T> static void morph_hats_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a morphological operation Beucher gradient on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. */ template<typename T> static void morph_beucher_launcher(T *in, T *out, int width, int height, int padding, int *mask); /* * The method responsible for perfoming a morphological operation self-dual top hat on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. */ template<typename T> static void morph_sdth_launcher(T *in, T *out, int width, int height, int padding, int *mask); /* * The method responsible for invoking calculations needed for performing a morphological operation on given image vector * which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is * expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects * of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation, * which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a * mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height. * Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the * smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> EXTERNAL GRADIENT * 2) true -> INTERNAL GRADIENT */ template<typename T> static void morph_gradients_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a wanted morphological operation on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. * * Argument type determines a morphological operation: * 1) false -> EXTERNAL GRADIENT * 2) true -> EXTERNAL GRADIENT */ template<typename T> static void morph_gradients_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type); /* * The method responsible for perfoming a morphological operation laplacian on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. */ template<typename T> static void morph_laplacian_launcher(T *in, T *out, int width, int height, int padding, int *mask); /* * The method responsible for perfoming a morphological operation shockfilter on given image vector. The input vector in is * expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size * width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions * 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in * the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem. * Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist. */ template<typename T> static void morph_shock_launcher(T *in, T *out, int width, int height, int padding, int *mask); /* * A method responsible for calculating morphological operation shokfilter. Arguments preparedDilation and preparedErosion * represent memory locations where two (2 * padding + width) * (2 * padding + height) matrices representing the original image * that has already been prepared for performing dilation and erosion operations, respectively. Both matrices contain MorphCircle * objects as elements. Also, a morphological laplacian of the original image has been stored on memory location laplacian, as a * width * height matrix of objects T. The morphological shockfilter is performend as follows: if trace(laplacian[pixel]) < 0, a * dilation of the selected pixel is performed, else, a erosion of the selected pixel is performed. The result is stored on memory * location out. */ template<typename T> static void morph_shock_operation(Circle *prepareDilation, Circle *prepareErosion, T *laplacian, T *out, int width, int height, int padding, int *mask); /* * A basic handler method for invoking launcher methods for performing all morphological operations which are introduced in the * paper of B. Burgeth and A. Kleefeld. Memory location in must contain the original image matrix with elements of type T. The * result of the selected morphological operation will be stored on memory location out. This memory location should be * preallocated to the size of width * height. Argument padding is a padding of the given structural element (maks). For example, * if the structuring element has dimensions 5x5, the padding is 2. * Argument iters defines number of iterations. * * The morphological operation is determined by morphType argument: * 0) DILATION * 1) EROSION * 2) CLOSING * 3) OPENING * 4) BLACK TOP HAT * 5) WHITE TOP HAT * 6) SELF-DUAL TOP HAT * 7) BEUCHER GRADIENT * 8) EXTERNAL GRADIENT * 9) INTERNAL GRADIENT * 10) MORPHOLOGICAL LAPLACIAN * 11) SHOCK FILTER */ template<typename T> static void morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters = 0); // HELPER METHODS /* * Helper method that creates the output image (CImgFloatWrapper object) after performing the morphological operation. */ void createOutputImage(); /* * Helper method for filling the array in with the given size with the given element alpha. */ template<typename T> static void fill(T *in, int size, T alpha); /* * Helper method that prepares the image vector for morphological operations. The image vector is stored on memory location in. * Its length should be width * height. The result is stored on the memory location out. Memory allocation should be done before * calling this method. Out should be allocated to the size of (width + (2 * padding)) * (height + (2 * padding)) * sizeof(T) * because a vector used in morphological operations should have an appropriate padding. * * Argument type determines a type of morphological operation that the vector needs to be prepared for: * 1) false -> DILATION * 2) true -> EROSION */ template<typename T> static void prepare_vector(T *in, Circle *out, int width, int height, int padding, bool type); /* * Helper method for copying one array to another. */ template<typename T> static void copy(T *in, T *out, int size); /* * Reading a structuring element (a mask) from a file specified by the given file name. Also, a mask dimension needs to be * provided. The Mask is expected to be a maskDim * maskDim matrix containing only 0s and 1s. */ static void readMaskFromFile(int *maskPointer, int maskDim, const char *fileName); // DEBUGGING /* * Helper method for printing the given matrix of MorphCircle objects to the standard output. * Used for debbugging. */ static void print_vector(Circle *in, int width, int height, int lda); /* * Helper method for printing the given matrix of MorphColorMatrix objects to the standard output. * Used for debbugging. */ static void print_vector(LoewnerMorphology::MorphColorMatrix *in, int width, int height, int lda); /* * Helper method for printing the given matrix of floats to the standard output. * Used for debbugging. */ static void print_vector(float *in, int width, int height, int lda); }; template<typename T> void LoewnerMorphology::Morph::fill(T *in, int size, T alpha) { #pragma omp parallel for for (int i = 0; i < size; i++) { in[i] = alpha; } } template<typename T> void LoewnerMorphology::Morph::copy(T *in, T *out, int size) { #pragma omp parallel for for (int i = 0; i < size; i++) { out[i] = in[i]; } } template<typename T> void LoewnerMorphology::Morph::prepare_vector(T *in, Circle *out, int width, int height, int padding, bool type) { Circle element = (type) ? Circle(T::max()).prepareMin() : Circle(T::min()).prepareMax(); fill<Circle>(out, (width + 2 * padding) * (height + 2 * padding), element); int pWidth = width + 2 * padding; #pragma omp parallel for for (int i = 0; i < height; i++) { for(int j = 0; j < width; j++) { out[(i + padding) * pWidth + (j + padding)] = (type) ? Circle(in[i * width + j]).prepareMin() : Circle(in[i * width + j]).prepareMax(); } } } template<typename T> void LoewnerMorphology::Morph::morph_basic_handler(Circle *inPrepared, T* in, T* out, int width, int height, int padding, int *mask, bool type) { int pWidth = width + 2 * padding; prepare_vector<T>(in, inPrepared, width, height, padding, type); #pragma omp parallel for for (int i = 0; i < height; i++) { for(int j = 0; j < width; j++) { Circle *current = inPrepared + i * pWidth + j; out[i * width + j] = morph_basic_operation(current, pWidth, mask, padding, type); } } } template<typename T> void LoewnerMorphology::Morph::morph_basic_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type); delete inPrepared; } template<typename T> void LoewnerMorphology::Morph::morph_second_order_handler(Circle *inPrepared, T* in, T* out, int width, int height, int padding, int *mask, bool type) { morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type); morph_basic_handler(inPrepared, out, out, width, height, padding, mask, !type); } template<typename T> void LoewnerMorphology::Morph::morph_second_order_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, type); delete inPrepared; } template<typename T> void LoewnerMorphology::Morph::morph_einstein_launcher(T *image1, T *image2, T *imageResult, int width, int height, int lda1, int lda2, int lda3) { #pragma omp parallel for for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { LoewnerMorphology::MorphColorMatrix m1 = Circle(image1[i * lda1 + j]).toMorphColorMatrixSphere(); LoewnerMorphology::MorphColorMatrix m2 = Circle(image2[i * lda2 + j]).toMorphColorMatrixSphere().negate(); imageResult[i * lda3 + j] = Circle(EinsteinOperations::einsteinAdditionMod(m1, m2)).toMorphColorMatrixCone2Epsilon(); } } } template<typename T> void LoewnerMorphology::Morph::morph_hats_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type) { morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, type); if (type) { morph_einstein_launcher(in, out, out, width, height, width, width, width); } else { morph_einstein_launcher(out, in, out, width, height, width, width, width); } } template<typename T> void LoewnerMorphology::Morph::morph_hats_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; morph_hats_handler(inPrepared, in, out, width, height, padding, mask, type); delete inPrepared; } template<typename T> void LoewnerMorphology::Morph::morph_beucher_launcher(T *in, T *out, int width, int height, int padding, int *mask) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; T *temp = new T[width * height]; morph_basic_handler(inPrepared, in, out, width, height, padding, mask, false); morph_basic_handler(inPrepared, in, temp, width, height, padding, mask, true); morph_einstein_launcher(out, temp, out, width, height, width, width, width); delete inPrepared; delete temp; } template<typename T> void LoewnerMorphology::Morph::morph_sdth_launcher(T *in, T *out, int width, int height, int padding, int *mask) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; T *temp = new T[width * height]; morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, false); morph_second_order_handler(inPrepared, in, temp, width, height, padding, mask, true); morph_einstein_launcher(out, temp, out, width, height, width, width, width); delete inPrepared; delete temp; } template<typename T> void LoewnerMorphology::Morph::morph_gradients_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type) { morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type); if (type) { morph_einstein_launcher(in, out, out, width, height, width, width, width); } else { morph_einstein_launcher(out, in, out, width, height, width, width, width); } } template<typename T> void LoewnerMorphology::Morph::morph_gradients_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; morph_gradients_handler(inPrepared, in, out, width, height, padding, mask, type); delete inPrepared; } template<typename T> void LoewnerMorphology::Morph::morph_laplacian_launcher(T *in, T *out, int width, int height, int padding, int *mask) { Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; T *temp = new T[width * height]; morph_gradients_handler(inPrepared, in, out, width, height, padding, mask, false); morph_gradients_handler(inPrepared, in, temp, width, height, padding, mask, true); morph_einstein_launcher(out, temp, out, width, height, width, width, width); delete inPrepared; delete temp; } template<typename T> void LoewnerMorphology::Morph::morph_shock_launcher(T *in, T *out, int width, int height, int padding, int *mask) { T *laplacian = new T[width * height]; Circle *dilationPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; Circle *erosionPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))]; prepare_vector<T>(in, dilationPrepared, width, height, padding, false); prepare_vector<T>(in, erosionPrepared, width, height, padding, true); morph_laplacian_launcher(in, laplacian, width, height, padding, mask); morph_shock_operation(dilationPrepared, erosionPrepared, laplacian, out, width, height, padding, mask); delete dilationPrepared; delete erosionPrepared; delete laplacian; } template<typename T> void LoewnerMorphology::Morph::morph_shock_operation(Circle *dilationPrepared, Circle *erosionPrepared, T *laplacian, T *out, int width, int height, int padding, int *mask) { int pWidth = width + 2 * padding; #pragma omp parallel for for (int i = 0; i < height; i++) { for(int j = 0; j < width; j++) { int currentIdx = i * pWidth + j; if (laplacian[i * width + j].trace() <= 0) { out[i * width + j] = morph_basic_operation(dilationPrepared + currentIdx, pWidth, mask, padding, false); } else { out[i * width + j] = morph_basic_operation(erosionPrepared + currentIdx, pWidth, mask, padding, true); } } } } template<typename T> void LoewnerMorphology::Morph::morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters) { if (iters < 1) { printf("Operation cannot be executed. Number of iterations must be greater than 0, but %d provided.\n", iters); exit(EXIT_FAILURE); } T *temp = nullptr; if (iters > 1) { temp = new T[width * height]; } switch (morphType) { case 0: for (int i = 0; i < iters; i++) { if (i == 0) { morph_basic_launcher<T>(in, out, width, height, padding, mask, false); } else { copy(out, temp, width * height); morph_basic_launcher<T>(temp, out, width, height, padding, mask, false); } } break; case 1: for (int i = 0; i < iters; i++) { if (i == 0) { morph_basic_launcher<T>(in, out, width, height, padding, mask, true); } else { copy(out, temp, width * height); morph_basic_launcher<T>(temp, out, width, height, padding, mask, true); } } break; case 2: for (int i = 0; i < iters; i++) { if (i == 0) { morph_second_order_launcher<T>(in, out, width, height, padding, mask, false); } else { copy(out, temp, width * height); morph_second_order_launcher<T>(temp, out, width, height, padding, mask, false); } } break; case 3: for (int i = 0; i < iters; i++) { if (i == 0) { morph_second_order_launcher<T>(in, out, width, height, padding, mask, true); } else { copy(out, temp, width * height); morph_second_order_launcher<T>(temp, out, width, height, padding, mask, true); } } break; case 4: for (int i = 0; i < iters; i++) { if (i == 0) { morph_hats_launcher<T>(in, out, width, height, padding, mask, false); } else { copy(out, temp, width * height); morph_hats_launcher<T>(temp, out, width, height, padding, mask, false); } } break; case 5: for (int i = 0; i < iters; i++) { if (i == 0) { morph_hats_launcher<T>(in, out, width, height, padding, mask, true); } else { copy(out, temp, width * height); morph_hats_launcher<T>(temp, out, width, height, padding, mask, true); } } break; case 6: for (int i = 0; i < iters; i++) { if (i == 0) { morph_sdth_launcher<T>(in, out, width, height, padding, mask); } else { copy(out, temp, width * height); morph_sdth_launcher<T>(temp, out, width, height, padding, mask); } } break; case 7: for (int i = 0; i < iters; i++) { if (i == 0) { morph_beucher_launcher<T>(in, out, width, height, padding, mask); } else { copy(out, temp, width * height); morph_beucher_launcher<T>(temp, out, width, height, padding, mask); } } break; case 8: for (int i = 0; i < iters; i++) { if (i == 0) { morph_gradients_launcher<T>(in, out, width, height, padding, mask, false); } else { copy(out, temp, width * height); morph_gradients_launcher<T>(temp, out, width, height, padding, mask, false); } } break; case 9: for (int i = 0; i < iters; i++) { if (i == 0) { morph_gradients_launcher<T>(in, out, width, height, padding, mask, true); } else { copy(out, temp, width * height); morph_gradients_launcher<T>(temp, out, width, height, padding, mask, true); } } break; case 10: for (int i = 0; i < iters; i++) { if (i == 0) { morph_laplacian_launcher<T>(in, out, width, height, padding, mask); } else { copy(out, temp, width * height); morph_laplacian_launcher<T>(temp, out, width, height, padding, mask); } } break; case 11: for (int i = 0; i < iters; i++) { if (i == 0) { morph_shock_launcher<T>(in, out, width, height, padding, mask); } else { copy(out, temp, width * height); morph_shock_launcher<T>(temp, out, width, height, padding, mask); } } break; } if (temp != nullptr) { delete temp; } } #endif
idaFoodWeb_kry_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDA: Food web problem, OpenMP, GMRES, * user-supplied preconditioner * * This example program uses SUNLinSol_SPGMR as the linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDA using the SUNLinSol_SPGMR linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idaFoodWeb_kry_omp * To specify the number of threads at the command line, use * % ./idaFoodWeb_kry_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ida/ida.h> #include <sunlinsol/sunlinsol_spgmr.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_dense.h> #include <sundials/sundials_types.h> #include <sundials/sundials_math.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; realtype **PP[MX][MY]; sunindextype *pivot[MX][MY]; N_Vector rates; N_Vector ewt; void *ida_mem; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data); static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype delta, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(int maxl, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, jx, jy, retval; int maxl; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */ #endif if (argc > 1) /* overwrithe with command line value, if supplied */ num_threads = strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->ewt = N_VNew_OpenMP(NEQ, num_threads); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy++) { (webdata->pivot)[jx][jy] = newIndexArray(NUM_SPECIES); (webdata->PP)[jx][jy] = newDenseMat(NUM_SPECIES, NUM_SPECIES); } } webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); webdata->ida_mem = ida_mem; /* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set preconditioning routines. */ maxl = 16; /* max dimension of the Krylov subspace */ LS = SUNLinSol_SPGMR(cc, PREC_LEFT, maxl); /* IDA only allows left preconditioning */ if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, NULL); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); retval = IDASetPreconditioner(ida_mem, Precond, PSolve); if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(maxl, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); N_VDestroy_OpenMP(webdata->ewt); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy ++) { destroyArray((webdata->pivot)[jx][jy]); destroyMat((webdata->PP)[jx][jy]); } } free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data) { int retval; realtype uround, xx, yy, del_x, del_y; realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp; realtype inc, fac, sqru, perturb_rates[NUM_SPECIES]; int is, js, jx, jy, ret; void *ida_mem; N_Vector ewt; realtype hh; UserData webdata; webdata = (UserData) user_data; del_x = webdata->dx; del_y = webdata->dy; uround = UNIT_ROUNDOFF; sqru = SUNRsqrt(uround); ida_mem = webdata->ida_mem; ewt = webdata->ewt; retval = IDAGetErrWeights(ida_mem, ewt); if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1); retval = IDAGetCurrentStep(ida_mem, &hh); if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1); for (jy = 0; jy < MY; jy++) { yy = jy * del_y; for (jx = 0; jx < MX; jx++) { xx = jx * del_x; Pxy = (webdata->PP)[jx][jy]; cxy = IJ_Vptr(cc, jx, jy); cpxy = IJ_Vptr(cp, jx, jy); ewtxy = IJ_Vptr(ewt, jx, jy); ratesxy = IJ_Vptr((webdata->rates), jx, jy); for (js = 0; js < NUM_SPECIES; js++) { inc = sqru*(SUNMAX(SUNRabs(cxy[js]), SUNMAX(hh*SUNRabs(cpxy[js]), ONE/ewtxy[js]))); cctmp = cxy[js]; cxy[js] += inc; fac = -ONE/inc; WebRates(xx, yy, cxy, perturb_rates, webdata); Pxycol = Pxy[js]; for (is = 0; is < NUM_SPECIES; is++) Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac; if (js < 1) Pxycol[js] += cj; cxy[js] = cctmp; } ret = denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]); if (ret != 0) return(1); } } return(0); } static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype dalta, void *user_data) { realtype **Pxy, *zxy; sunindextype *pivot; int jx, jy; UserData webdata; webdata = (UserData) user_data; N_VScale(ONE, rvec, zvec); #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads) for (jx = 0; jx < MX; jx++) { for (jy = 0; jy <MY; jy++) { zxy = IJ_Vptr(zvec, jx, jy); Pxy = (webdata->PP)[jx][jy]; pivot = (webdata->pivot)[jx][jy]; denseGETRS(Pxy, NUM_SPECIES, pivot, zxy); } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { int i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(int maxl, realtype rtol, realtype atol) { printf("\nidaFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDA \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n",maxl); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, sli, netf, nps, npevals, nrevalsLS; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumLinIters(ida_mem, &sli); check_retval(&retval, "IDAGetNumLinIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumPrecSolves(ida_mem, &nps); check_retval(&retval, "IDAGetNumPrecSolves", 1); retval = IDAGetNumPrecEvals(ida_mem, &npevals); check_retval(&retval, "IDAGetNumPrecEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre); printf("Number of Preconditioner evaluations = %ld\n", npevals); printf("Number of linear iterations = %ld\n", sli); printf("Number of error test failures = %ld\n", netf); printf("Number of precond solve fun called = %ld\n", nps); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
DRB098-simd2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* Two-dimension array computation with a vetorization directive collapse(2) makes simd associate with 2 loops. Loop iteration variables should be predetermined as lastprivate. */ int main() { int len=100; double a[len][len], b[len][len], c[len][len]; int i,j; for (i=0;i<len;i++) for (j=0;j<len;j++) { a[i][j]=((double)i)/2.0; b[i][j]=((double)i)/3.0; c[i][j]=((double)i)/7.0; } #pragma omp simd collapse(2) for (i=0;i<len;i++) for (j=0;j<len;j++) c[i][j]=a[i][j]*b[i][j]; printf ("c[50][50]=%f\n",c[50][50]); return 0; }
main.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define I(N, i, j) ((N) * (i) + (j)) #ifdef DEBUG # define DBG(FORMAT, ARGS...) fprintf(stderr, FORMAT, ARGS) #else # define DBG(FORMAT, ARGS...) \ {} #endif void to_image(char* filename, size_t N, float* img) { float max = img[0]; float min = img[0]; for (size_t i = 1; i < N * N; ++i) { max = (max < img[i]) ? img[i] : max; min = (min > img[i]) ? img[i] : min; } FILE* fp = fopen(filename, "w"); fprintf(fp, "P3\n%zu %zu\n255\n", N, N); for (int h = N - 1; h >= 0; --h) { for (size_t w = 0; w < N; ++w) { float pixel = (img[I(N, h, w)] > 0) ? img[I(N, h, w)] : 0; float H = 60 + (240 - 60) * (1 - (pixel + min) / (max + min)); float X = 1 * (1 - fabs(fmod(H / 60.0, 2) - 1)); float r, g, b; if (H >= 0 && H < 60) { r = 1, g = X, b = 0; } else if (H >= 60 && H < 120) { r = X, g = 1, b = 0; } else if (H >= 120 && H < 180) { r = 0, g = 1, b = X; } else if (H >= 180 && H < 240) { r = 0, g = X, b = 1; } else if (H >= 240 && H < 300) { r = X, g = 0, b = 1; } else { r = 1, g = 0, b = X; } fprintf(fp, "%d %d %d\n", (int) (r * 255), (int) (g * 255), (int) (b * 255)); } } fclose(fp); } void populate_matrix(size_t N, float* w) { // 0th line for (size_t j = 0; j < N; ++j) { w[I(N, 0, j)] = 0; } // middle lines for (size_t i = 1; i < N - 1; ++i) { w[I(N, i, 0)] = 100; for (size_t j = 1; j < N - 1; ++j) { w[I(N, i, j)] = 50; } w[I(N, i, N - 1)] = 100; } // last line for (size_t j = 0; j < N; ++j) { w[I(N, N - 1, j)] = 100; } } void parallel_populate_matrix(size_t N, float* w) { // 0th line for (size_t j = 0; j < N; ++j) { w[I(N, 0, j)] = 0; } // middle lines #pragma omp parallel for for (size_t i = 1; i < N - 1; ++i) { w[I(N, i, 0)] = 100; for (size_t j = 1; j < N - 1; ++j) { w[I(N, i, j)] = 50; } w[I(N, i, N - 1)] = 100; } // last line for (size_t j = 0; j < N; ++j) { w[I(N, N - 1, j)] = 100; } } float* poisson_gs(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1; j < N - 1; ++j) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } float* poisson_gs_parallel(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); for (size_t diag = 1; diag < (N - 1) * 2; ++diag) { #pragma omp parallel for for (size_t i = 1; i <= diag; ++i) { int j = diag - i + 1; if (j < N - 1 && i < N - 1) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } float* poisson_gsrb(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); // update red points for (size_t i = 1; i < N - 2; ++i) { for (size_t j = 1 + (i % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } // update black points for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + ((i + 1) % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } float* poisson_gsrb_parallel(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); #pragma omp parallel { // update red points #pragma omp for for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + (i % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } // update black points #pragma omp for for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + ((i + 1) % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } float* poisson_sorrb(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float p = 2.0f / (1.0f + sinf(M_PI / (N - 1))); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); // update red points for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + (i % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (1 - p) * w[I(N, i, j)] + p * (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } // update black points for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + ((i + 1) % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (1 - p) * w[I(N, i, j)] + p * (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } float* poisson_sorrb_parallel(size_t N, float TOL, size_t* ITER) { float* w = malloc(sizeof(float) * N * N); populate_matrix(N, w); float* u = malloc(sizeof(float) * N * N); float p = 2.0f / (1.0f + sinf(M_PI / (N - 1))); float DIFF = TOL + 1; size_t iter = 0; for (; DIFF > TOL; ++iter) { // store last iteration in u memcpy(u, w, sizeof(float) * N * N); #pragma omp parallel { // update red points #pragma omp for for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + (i % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (1 - p) * w[I(N, i, j)] + p * (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } // update black points #pragma omp for for (size_t i = 1; i < N - 1; ++i) { for (size_t j = 1 + ((i + 1) % 2); j < N - 1; j += 2) { w[I(N, i, j)] = (1 - p) * w[I(N, i, j)] + p * (w[I(N, i - 1, j)] + w[I(N, i, j - 1)] + w[I(N, i, j + 1)] + w[I(N, i + 1, j)]) / 4; } } } DIFF = fabs(w[0] - u[0]); for (size_t i = 1; i < N * N; ++i) { float n = fabs(w[i] - u[i]); DIFF = (DIFF < n) ? n : DIFF; } } free(u); *ITER = iter; return w; } void run_test( float* f(size_t N, float TOL, size_t* ITER), size_t N, float TOL, size_t* ITER, char* filename) { clock_t start, end; start = clock(); float* img = f(N, TOL, ITER); end = clock(); to_image(filename, N, img); printf("%s \t%zu iter\t%f sec\n", filename, *ITER, ((double) (end - start) / CLOCKS_PER_SEC)); free(img); } int main(int argc, char* argv[]) { size_t N = atoi(argv[1]); float TOL = 0.02; size_t ITER; run_test(poisson_gs, N, TOL, &ITER, "poisson_gs.ppm"); run_test(poisson_gs_parallel, N, TOL, &ITER, "poisson_gs_par.ppm"); puts(""); run_test(poisson_gsrb, N, TOL, &ITER, "poisson_gsrb.ppm"); run_test(poisson_gsrb_parallel, N, TOL, &ITER, "poisson_gsrb_par.ppm"); puts(""); run_test(poisson_sorrb, N, TOL, &ITER, "poisson_sorrb.ppm"); run_test(poisson_sorrb_parallel, N, TOL, &ITER, "poisson_sorrb_par.ppm"); }
SoaDistanceTableAB.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AB_H #define QMCPLUSPLUS_DTDIMPL_AB_H #include "Utilities/FairDivide.h" #include "Message/OpenMP.h" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for AB using a transposed form */ template<typename T, unsigned D, int SC> struct SoaDistanceTableAB : public DTD_BConds<T, D, SC>, public DistanceTableData { SoaDistanceTableAB(const ParticleSet& source, ParticleSet& target) : DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target) { resize(source.getTotalNum(), target.getTotalNum()); } void resize(int ns, int nt) { N_sources = ns; N_targets = nt; if (N_sources * N_targets == 0) return; // initialize memory containers and views const int Nsources_padded = getAlignedSize<T>(N_sources); distances_.resize(N_targets); displacements_.resize(N_targets); for (int i = 0; i < N_targets; ++i) { distances_[i].resize(Nsources_padded); displacements_[i].resize(Nsources_padded); } // The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function // temp_r_ is padded explicitly while temp_dr_ is padded internally temp_r_.resize(Nsources_padded); temp_dr_.resize(N_sources); } SoaDistanceTableAB() = delete; SoaDistanceTableAB(const SoaDistanceTableAB&) = delete; /** evaluate the full table */ inline void evaluate(ParticleSet& P) { #pragma omp parallel { int first, last; FairDivideAligned(N_sources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last); //be aware of the sign of Displacement for (int iat = 0; iat < N_targets; ++iat) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], first, last); } } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old) { DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_, 0, N_sources); // If the full table is not ready all the time, overwrite the current value. // If this step is missing, DT values can be undefined in case a move is rejected. if (!need_full_table_) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], 0, N_sources); } ///update the stripe for jat-th particle inline void update(IndexType iat, bool partial_update) { std::copy_n(temp_r_.data(), N_sources, distances_[iat].data()); for (int idim = 0; idim < D; ++idim) std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim)); } size_t get_neighbors(int iat, RealType rcut, int* restrict jid, RealType* restrict dist, PosType* restrict displ) const { constexpr T cminus(-1); size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list jid[nn] = jat; dist[nn] = rij; displ[nn] = cminus * displacements_[jat][iat]; nn++; } } return nn; } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index = -1; if (newpos) { for (int jat = 0; jat < N_sources; ++jat) if (temp_r_[jat] < min_dist) { min_dist = temp_r_[jat]; index = jat; } if (index >= 0) { r = min_dist; dr = temp_dr_[index]; } } else { for (int jat = 0; jat < N_sources; ++jat) if (distances_[iat][jat] < min_dist) { min_dist = distances_[iat][jat]; index = jat; } if (index >= 0) { r = min_dist; dr = displacements_[iat][index]; } } return index; } size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const { size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list dist[nn] = rij; nn++; } } return nn; } }; } // namespace qmcplusplus #endif
mm-omp-row.c
/** * * Matrix Multiplication - Shared-memory (OpenMP) * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> #include <omp.h> #include <xmmintrin.h> int size; int threads; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef LINUX struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000LL); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000LL); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; // allocate array for all the rows m->element = (float**)malloc(sizeof(float*) * size); if (m->element == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { m->element[i] = (float*)malloc(sizeof(float) * size); if (m->element[i] == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } } } /** * Free the memory allocated to a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) { free(m->element[i]); } free(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Parallelize the multiplication // Each thread will work on one iteration of the outer-most loop // Variables are shared among threads (a, b, result) // and each thread has its own private copy (i, j, k) #pragma omp parallel for shared(a, b, result) private (i, j, k) for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[j][k]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result; long long before, after; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform parallel matrix multiplication before = wall_clock_time(); mm(a, b, result); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000); // Print the result matrix // print_matrix(result); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size> <threads>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; if (argc >= 3) threads = atoi(argv[2]); else threads = -1; // Multiply the matrices if (threads != -1) { omp_set_num_threads(threads); } #pragma omp parallel { threads = omp_get_num_threads(); } printf("Matrix multiplication of size %d using %d threads\n", size, threads); work(); return 0; }
bml_threshold_dense_typed.c
#ifdef BML_USE_MAGMA #include "magma_v2.h" #include "../bml_export.h" #include "bml_export_dense.h" #endif #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_threshold.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_allocate_dense.h" #include "bml_threshold_dense.h" #include "bml_types_dense.h" #include "../bml_logger.h" #include <complex.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Threshold a matrix. * * \ingroup threshold_group * * \param A The matrix to be thresholded * \param threshold Threshold value * \return The thresholded A */ bml_matrix_dense_t *TYPED_FUNC( bml_threshold_new_dense) ( bml_matrix_dense_t * A, double threshold) { #ifdef BML_USE_MAGMA LOG_ERROR ("bml_threshold_new_dense() not implemented for MAGMA matrices\n"); #endif int N = A->N; bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->N }; bml_matrix_dense_t *B = TYPED_FUNC(bml_zero_matrix_dense) (matrix_dimension, A->distribution_mode); REAL_T *A_matrix = A->matrix; REAL_T *B_matrix = B->matrix; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, A_matrix, B_matrix) \ shared(A_localRowMin, A_localRowMax, myRank) //for (int i = 0; i < N * N; i++) for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N; i++) { if (is_above_threshold(A_matrix[i], (REAL_T) threshold)) { B_matrix[i] = A_matrix[i]; } } return B; } /** Threshold a matrix in place. * * \ingroup threshold_group * * \param A The matrix to be thresholded * \param threshold Threshold value * \return The thresholded A */ void TYPED_FUNC( bml_threshold_dense) ( bml_matrix_dense_t * A_bml, double threshold) { int N = A_bml->N; #ifdef BML_USE_MAGMA REAL_T *A_matrix = bml_export_to_dense(A_bml, dense_row_major); #else REAL_T *A_matrix = A_bml->matrix; #endif int *A_localRowMin = A_bml->domain->localRowMin; int *A_localRowMax = A_bml->domain->localRowMax; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, A_matrix) \ shared(A_localRowMin, A_localRowMax, myRank) //for (int i = 0; i < N * N; i++) for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N; i++) { if (!is_above_threshold(A_matrix[i], (REAL_T) threshold)) { A_matrix[i] = (REAL_T) 0.0; } } #ifdef BML_USE_MAGMA MAGMA(setmatrix) (N, N, (MAGMA_T *) A_matrix, N, A_bml->matrix, A_bml->ld, A_bml->queue); #endif }
stat_ops_dm.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "stat_ops_dm.h" #include "utility.h" #include "constant.h" // calculate norm double dm_state_norm_squared(const CTYPE *state, ITYPE dim) { ITYPE index; double norm = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+:norm) #endif for (index = 0; index < dim; ++index){ norm += creal(state[index*dim+index]); } return norm; } // calculate entropy of probability distribution of Z-basis measurements double dm_measurement_distribution_entropy(const CTYPE *state, ITYPE dim){ ITYPE index; double ent=0; const double eps = 1e-15; #ifdef _OPENMP #pragma omp parallel for reduction(+:ent) #endif for(index = 0; index < dim; ++index){ double prob = creal(state[index*dim + index]); if(prob > eps){ ent += -1.0*prob*log(prob); } } return ent; } // calculate probability with which we obtain 0 at target qubit double dm_M0_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){ const ITYPE loop_dim = dim/2; const ITYPE mask = 1ULL << target_qubit_index; ITYPE state_index; double sum =0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index=0;state_index<loop_dim;++state_index){ ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index); sum += creal(state[basis_0*dim+basis_0]); } return sum; } // calculate probability with which we obtain 1 at target qubit double dm_M1_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){ const ITYPE loop_dim = dim/2; const ITYPE mask = 1ULL << target_qubit_index; ITYPE state_index; double sum =0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index=0;state_index<loop_dim;++state_index){ ITYPE basis_1 = insert_zero_to_basis_index(state_index,mask,target_qubit_index) ^ mask; sum += creal(state[basis_1*dim + basis_1]); } return sum; } // calculate merginal probability with which we obtain the set of values measured_value_list at sorted_target_qubit_index_list // warning: sorted_target_qubit_index_list must be sorted. double dm_marginal_prob(const UINT* sorted_target_qubit_index_list, const UINT* measured_value_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){ ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE state_index; double sum=0.; #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) #endif for(state_index = 0;state_index < loop_dim; ++state_index){ ITYPE basis = state_index; for(UINT cursor=0; cursor < target_qubit_index_count ; cursor++){ UINT insert_index = sorted_target_qubit_index_list[cursor]; ITYPE mask = 1ULL << insert_index; basis = insert_zero_to_basis_index(basis, mask , insert_index ); basis ^= mask * measured_value_list[cursor]; } sum += creal(state[basis*dim+basis]); } return sum; } void dm_state_add(const CTYPE *state_added, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim*dim; ++index) { state[index] += state_added[index]; } } void dm_state_multiply(CTYPE coef, CTYPE *state, ITYPE dim) { ITYPE index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < dim*dim; ++index) { state[index] *= coef; } } double dm_expectation_value_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim) { const ITYPE matrix_dim = 1ULL << target_qubit_index_count; CTYPE* matrix = (CTYPE*)malloc(sizeof(CTYPE)*matrix_dim*matrix_dim); for (ITYPE y = 0; y < matrix_dim; ++y) { for (ITYPE x = 0; x < matrix_dim; ++x) { CTYPE coef = 1.0; for (UINT i = 0; i < target_qubit_index_count; ++i) { ITYPE xi = (x >> i) % 2; ITYPE yi = (y >> i) % 2; coef *= PAULI_MATRIX[Pauli_operator_type_list[i]][yi * 2 + xi]; } matrix[y*matrix_dim + x] = coef; } } const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count); CTYPE sum = 0; for (ITYPE state_index = 0; state_index< dim; ++state_index) { ITYPE small_dim_index = 0; ITYPE basis_0 = state_index; for (UINT i = 0; i < target_qubit_index_count; ++i) { UINT target_qubit_index = target_qubit_index_list[i]; if (state_index & (1ULL << target_qubit_index)) { small_dim_index += (1ULL << i); basis_0 ^= (1ULL << target_qubit_index); } } for (ITYPE i = 0; i < matrix_dim; ++i) { sum += matrix[small_dim_index*matrix_dim + i] * state[state_index*dim + (basis_0 ^ matrix_mask_list[i])]; } } free(matrix); free((ITYPE*)matrix_mask_list); return creal(sum); } void dm_state_tensor_product(const CTYPE* state_left, ITYPE dim_left, const CTYPE* state_right, ITYPE dim_right, CTYPE* state_dst) { ITYPE y_left, x_left, y_right, x_right; const ITYPE dim_new = dim_left * dim_right; for (y_left = 0; y_left < dim_left; ++y_left) { for (x_left = 0; x_left < dim_left; ++x_left) { CTYPE val_left = state_left[y_left * dim_left + x_left]; for (y_right = 0; y_right < dim_right; ++y_right) { for (x_right = 0; x_right < dim_right; ++x_right) { CTYPE val_right = state_right[y_right * dim_right + x_right]; ITYPE x_new = x_left * dim_left + x_right; ITYPE y_new = y_left * dim_left + y_right; state_dst[y_new * dim_new + x_new] = val_right * val_left; } } } } } void dm_state_permutate_qubit(const UINT* qubit_order, const CTYPE* state_src, CTYPE* state_dst, UINT qubit_count, ITYPE dim) { ITYPE y, x; for (y = 0; y < dim; ++y) { for (x = 0; x < dim; ++x) { ITYPE src_x = 0, src_y = 0; for (UINT qubit_index = 0; qubit_index < qubit_count; ++qubit_index) { if ((x >> qubit_index) % 2) { src_x += 1ULL << qubit_order[qubit_index]; } if ((y >> qubit_index) % 2) { src_y += 1ULL << qubit_order[qubit_index]; } } state_dst[y * dim + x] = state_src[src_y * dim + src_x]; } } } void dm_state_partial_trace_from_density_matrix(const UINT* target, UINT target_count, const CTYPE* state_src, CTYPE* state_dst, ITYPE dim) { ITYPE dst_dim = dim >> target_count; ITYPE trace_dim = 1ULL << target_count; UINT* sorted_target = create_sorted_ui_list(target, target_count); ITYPE* mask_list = create_matrix_mask_list(target, target_count); ITYPE y,x; for (y = 0; y < dst_dim; ++y) { for (x = 0; x < dst_dim; ++x) { ITYPE base_x = x; ITYPE base_y = y; for (UINT target_index = 0; target_index < target_count; ++target_index) { UINT insert_index = sorted_target[target_index]; base_x = insert_zero_to_basis_index(base_x, 1ULL << insert_index, insert_index); base_y = insert_zero_to_basis_index(base_y, 1ULL << insert_index, insert_index); } CTYPE val = 0.; for (ITYPE idx = 0; idx < trace_dim; ++idx) { ITYPE src_x = base_x ^ mask_list[idx]; ITYPE src_y = base_y ^ mask_list[idx]; val += state_src[src_y * dim + src_x]; } state_dst[y*dst_dim + x] = val; } } free(sorted_target); free(mask_list); } void dm_state_partial_trace_from_state_vector(const UINT* target, UINT target_count, const CTYPE* state_src, CTYPE* state_dst, ITYPE dim) { ITYPE dst_dim = dim >> target_count; ITYPE trace_dim = 1ULL << target_count; UINT* sorted_target = create_sorted_ui_list(target, target_count); ITYPE* mask_list = create_matrix_mask_list(target, target_count); ITYPE y, x; for (y = 0; y < dst_dim; ++y) { for (x = 0; x < dst_dim; ++x) { ITYPE base_x = x; ITYPE base_y = y; for (UINT target_index = 0; target_index < target_count; ++target_index) { UINT insert_index = sorted_target[target_index]; base_x = insert_zero_to_basis_index(base_x, 1ULL << insert_index, insert_index); base_y = insert_zero_to_basis_index(base_y, 1ULL << insert_index, insert_index); } CTYPE val = 0.; for (ITYPE idx = 0; idx < trace_dim; ++idx) { ITYPE src_x = base_x ^ mask_list[idx]; ITYPE src_y = base_y ^ mask_list[idx]; val += state_src[src_y] * conj(state_src[src_x]); } state_dst[y*dst_dim + x] = val; } } free(sorted_target); free(mask_list); }
omp_nest_lock.c
<ompts:test> <ompts:testdescription>Test which checks the omp_set_nest_lock and the omp_unset_nest_lock function.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp_nest_lock</ompts:directive> <ompts:dependences>omp flush</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" omp_nest_lock_t lck; int <ompts:testcode:functionname>omp_nest_lock</ompts:testcode:functionname>(FILE * logFile) { int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_nest_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for(i = 0; i < LOOPCOUNT; i++) { <ompts:orphan> <ompts:check>omp_set_nest_lock (&lck);</ompts:check> </ompts:orphan> #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; <ompts:orphan> <ompts:check>omp_unset_nest_lock (&lck);</ompts:check> </ompts:orphan> } } omp_destroy_nest_lock (&lck); return ((result == 0) && (nr_iterations == LOOPCOUNT)); } </ompts:testcode> </ompts:test>
opencl_odf_aes_fmt_plug.c
/* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF AES format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_odf_aes; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_odf_aes); #else #include <string.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #include "sha2.h" #define FORMAT_LABEL "ODF-AES-opencl" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA256 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(odf_cpu_salt) typedef struct { uint32_t length; uint8_t v[32]; // hash of password } odf_password; typedef struct { uint32_t v[32/4]; } odf_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } odf_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int content_length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } odf_cpu_salt; static odf_cpu_salt *cur_salt; static struct fmt_tests tests[] = { {"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"}, /* CMIYC 2013 "pro" hard hash */ {"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"}, {NULL} }; static cl_int cl_error; static odf_password *inbuffer; static odf_hash *outbuffer; static odf_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(odf_password) * gws; outsize = sizeof(odf_hash) * gws; settingsize = sizeof(odf_salt); cracked_size = sizeof(*crypt_out) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(odf_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$odf$*", 6)) return 0; /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 1) { goto err; } if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* something */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; res = strlen(p); if (res > 2048 || res & 1) goto err; if (!ishexlc(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static odf_cpu_salt cs; ctcopy += 6; /* skip over "$odf$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); p = strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); p = strtokm(NULL, "*"); memset(cs.content, 0, sizeof(cs.content)); for (i = 0; p[i * 2] && i < 1024; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.content_length = i; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$odf$*" */ p = strtokm(ctcopy, "*"); p = strtokm(NULL, "*"); p = strtokm(NULL, "*"); p = strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (odf_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[32]; SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA256_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 32); inbuffer[index].length = 32; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { AES_KEY akey; unsigned char iv[32]; SHA256_CTX ctx; unsigned char pt[1024]; memcpy(iv, cur_salt->iv, 32); memset(&akey, 0, sizeof(AES_KEY)); AES_set_decrypt_key((unsigned char*)outbuffer[index].v, 256, &akey); AES_cbc_encrypt(cur_salt->content, pt, cur_salt->content_length, &akey, iv, AES_DECRYPT); SHA256_Init(&ctx); SHA256_Update(&ctx, pt, cur_salt->content_length); SHA256_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } /* * The format tests all have iteration count 1024. * Just in case the iteration count is tunable, let's report it. */ static unsigned int iteration_count(void *salt) { odf_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_odf_aes = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, 4, SALT_SIZE, 4, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
SoaDistanceTableABOMP.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AB_OMP_H #define QMCPLUSPLUS_DTDIMPL_AB_OMP_H #include "OpenMP/OMPallocator.hpp" #include "Platforms/PinnedAllocator.h" #include "Particle/RealSpacePositionsOMP.h" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for AB using a transposed form */ template<typename T, unsigned D, int SC> class SoaDistanceTableABOMP : public DTD_BConds<T, D, SC>, public DistanceTableData { private: template<typename DT> using OffloadPinnedVector = Vector<DT, OMPallocator<DT, PinnedAlignedAllocator<DT>>>; ///accelerator output array for multiple walkers, N_targets x N_sources_padded x (D+1) (distances, displacements) OffloadPinnedVector<RealType> offload_output; ///accelerator input array for a list of target particle positions, N_targets x D OffloadPinnedVector<RealType> target_pos; ///accelerator input buffer for multiple data set OffloadPinnedVector<char> offload_input; ///accelerator output buffer for r and dr OffloadPinnedVector<RealType> r_dr_memorypool_; ///target particle id std::vector<int> particle_id; ///device pointer of r_dr_memorypool_ RealType* r_dr_device_ptr_; /// timer for offload portion NewTimer& offload_timer_; /// timer for copy portion NewTimer& copy_timer_; /// timer for offload portion NewTimer& eval_timer_; public: SoaDistanceTableABOMP(const ParticleSet& source, ParticleSet& target) : DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target), r_dr_device_ptr_(nullptr), offload_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableABOMP::offload_") + target.getName() + "_" + source.getName(), timer_level_fine)), copy_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableABOMP::copy_") + target.getName() + "_" + source.getName(), timer_level_fine)), eval_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableABOMP::evaluate_") + target.getName() + "_" + source.getName(), timer_level_fine)) { auto* coordinates_soa = dynamic_cast<const RealSpacePositionsOMP*>(&source.getCoordinates()); if (!coordinates_soa) throw std::runtime_error("Source particle set doesn't have OpenMP offload. Contact developers!"); resize(source.getTotalNum(), target.getTotalNum()); #pragma omp target enter data map(to:this[:1]) } void resize(int ns, int nt) { N_sources = ns; N_targets = nt; if (N_sources * N_targets == 0) return; // initialize memory containers and views const int N_sources_padded = getAlignedSize<T>(N_sources); const int stride_size = N_sources_padded * (D + 1); r_dr_memorypool_.resize(stride_size * N_targets); auto* pool_ptr = r_dr_memorypool_.data(); #pragma omp target data use_device_ptr(pool_ptr) { r_dr_device_ptr_ = pool_ptr; } distances_.resize(N_targets); displacements_.resize(N_targets); for (int i = 0; i < N_targets; ++i) { distances_[i].attachReference(r_dr_memorypool_.data() + i * stride_size, N_sources); displacements_[i].attachReference(N_sources, N_sources_padded, r_dr_memorypool_.data() + i * stride_size + N_sources_padded); } // The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function // temp_r_ is padded explicitly while temp_dr_ is padded internally temp_r_.resize(N_sources_padded); temp_dr_.resize(N_sources); } SoaDistanceTableABOMP() = delete; SoaDistanceTableABOMP(const SoaDistanceTableABOMP&) = delete; ~SoaDistanceTableABOMP() { #pragma omp target exit data map(delete:this[:1]) } /** evaluate the full table */ inline void evaluate(ParticleSet& P) { ScopedTimer eval(&eval_timer_); // be aware of the sign of Displacement const int N_targets_local = N_targets; const int N_sources_local = N_sources; const int N_sources_padded = getAlignedSize<T>(N_sources); target_pos.resize(N_targets * D); for (size_t iat = 0; iat < N_targets; iat++) for (size_t idim = 0; idim < D; idim++) target_pos[iat * D + idim] = P.R[iat][idim]; auto* target_pos_ptr = target_pos.data(); auto* source_pos_ptr = Origin->getCoordinates().getAllParticlePos().data(); auto* r_dr_ptr = r_dr_memorypool_.data(); // To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team. const int ChunkSizePerTeam = 256; const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam; { ScopedTimer offload(&offload_timer_); #pragma omp target teams distribute collapse(2) num_teams(N_targets*num_teams) \ map(to: source_pos_ptr[:N_sources_padded*D]) \ map(always, to: target_pos_ptr[:N_targets*D]) \ map(always, from: r_dr_ptr[:r_dr_memorypool_.size()]) for (int iat = 0; iat < N_targets_local; ++iat) for (int team_id = 0; team_id < num_teams; team_id++) { const int first = ChunkSizePerTeam * team_id; const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam; T pos[D]; for (int idim = 0; idim < D; idim++) pos[idim] = target_pos_ptr[iat * D + idim]; const size_t stride_size = N_sources_padded * (D + 1); auto* r_iat_ptr = r_dr_ptr + iat * stride_size; auto* dr_iat_ptr = r_iat_ptr + N_sources_padded; DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded, first, last); } } } /** It has two implementation mw_evaluate_transfer_inplace and mw_evaluate_fuse_transfer with different D2H memory transfer schemes. * Eventually, there will be only one version wihtout any transfer and solve the dilemma. */ inline void mw_evaluate(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list) { ScopedTimer eval(&eval_timer_); mw_evaluate_fuse_transfer(dt_list, p_list); } /** this function implements mw_evaluate. * After offloading the computation of distances and displacements, the per-walker result is transferred back walker by walker in place. * The runtime overhead is very high for small problem size with many walkers. */ inline void mw_evaluate_transfer_inplace(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list) { const size_t nw = dt_list.size(); size_t count_targets = 0; for (ParticleSet& p: p_list) count_targets += p.getTotalNum(); const size_t total_targets = count_targets; // This is horrible optimization putting different data types in a single buffer but allows a single H2D transfer constexpr size_t realtype_size = sizeof(RealType); constexpr size_t int_size = sizeof(int); constexpr size_t ptr_size = sizeof(RealType*); offload_input.resize(total_targets * D * realtype_size + total_targets * int_size + (nw + total_targets) * ptr_size); auto target_positions = reinterpret_cast<RealType*>(offload_input.data()); auto walker_id_ptr = reinterpret_cast<int*>(offload_input.data() + total_targets * D * realtype_size); auto source_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size); auto output_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size); const int N_sources_padded = getAlignedSize<T>(N_sources); offload_output.resize(total_targets * N_sources_padded * (D + 1)); count_targets = 0; for (size_t iw = 0; iw < nw; iw++) { auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get()); ParticleSet& pset(p_list[iw]); assert(N_sources == dt.N_sources); auto& RSoA_OMP = static_cast<const RealSpacePositionsOMP&>(dt.Origin->getCoordinates()); source_ptrs[iw] = const_cast<RealType*>(RSoA_OMP.getDevicePtr()); for (size_t iat = 0; iat < pset.getTotalNum(); ++iat, ++count_targets) { for (size_t idim = 0; idim < D; idim++) target_positions[count_targets * D + idim] = pset.R[iat][idim]; walker_id_ptr[count_targets] = iw; output_ptrs[count_targets] = dt.r_dr_device_ptr_ + iat * N_sources_padded * (D + 1); } } const int N_sources_local = N_sources; // To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team. const int ChunkSizePerTeam = 256; const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam; auto* input_ptr = offload_input.data(); { ScopedTimer offload(&offload_timer_); #pragma omp target teams distribute collapse(2) num_teams(total_targets*num_teams) \ map(always, to: input_ptr[:offload_input.size()]) \ nowait depend(out: total_targets) for (int iat = 0; iat < total_targets; ++iat) for (int team_id = 0; team_id < num_teams; team_id++) { auto* target_pos_ptr = reinterpret_cast<RealType*>(input_ptr); const int walker_id = reinterpret_cast<int*>(input_ptr + total_targets * D * realtype_size)[iat]; auto* source_pos_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size)[walker_id]; auto* r_iat_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size)[iat]; auto* dr_iat_ptr = r_iat_ptr + N_sources_padded; const int first = ChunkSizePerTeam * team_id; const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam; T pos[D]; for (int idim = 0; idim < D; idim++) pos[idim] = target_pos_ptr[iat * D + idim]; DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded, first, last); } } { ScopedTimer copy(&copy_timer_); for (size_t iw = 0; iw < nw; iw++) { auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get()); auto* pool_ptr = dt.r_dr_memorypool_.data(); #pragma omp target update from(pool_ptr[:dt.r_dr_memorypool_.size()]) nowait depend(inout:total_targets) } #pragma omp taskwait } } /** this function implements mw_evaluate. * After offloading the computation of distances and displacements, the result for all the walkers is transferred back together in one shot * and then copied to per-walker data structure. Memory copy on the CPU is still costly and not beneficial for large problem size with a few walkers. */ inline void mw_evaluate_fuse_transfer(const RefVector<DistanceTableData>& dt_list, const RefVector<ParticleSet>& p_list) { const size_t nw = dt_list.size(); size_t count_targets = 0; for (ParticleSet& p: p_list) count_targets += p.getTotalNum(); const size_t total_targets = count_targets; // This is horrible optimization putting different data types in a single buffer but allows a single H2D transfer const size_t realtype_size = sizeof(RealType); const size_t int_size = sizeof(int); const size_t ptr_size = sizeof(RealType*); offload_input.resize(total_targets * D * realtype_size + total_targets * int_size + nw * ptr_size); auto target_positions = reinterpret_cast<RealType*>(offload_input.data()); auto walker_id_ptr = reinterpret_cast<int*>(offload_input.data() + total_targets * D * realtype_size); auto source_ptrs = reinterpret_cast<RealType**>(offload_input.data() + total_targets * D * realtype_size + total_targets * int_size); particle_id.resize(total_targets); const int N_sources_padded = getAlignedSize<T>(N_sources); offload_output.resize(total_targets * N_sources_padded * (D + 1)); count_targets = 0; for (size_t iw = 0; iw < nw; iw++) { auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[iw].get()); ParticleSet& pset(p_list[iw]); assert(N_sources == dt.N_sources); auto& RSoA_OMP = static_cast<const RealSpacePositionsOMP&>(dt.Origin->getCoordinates()); source_ptrs[iw] = const_cast<RealType*>(RSoA_OMP.getDevicePtr()); for (size_t iat = 0; iat < pset.getTotalNum(); ++iat, ++count_targets) { for (size_t idim = 0; idim < D; idim++) target_positions[count_targets * D + idim] = pset.R[iat][idim]; walker_id_ptr[count_targets] = iw; particle_id[count_targets] = iat; } } const int N_sources_local = N_sources; // To maximize thread usage, the loop over electrons is chunked. Each chunk is sent to an OpenMP offload thread team. const int ChunkSizePerTeam = 256; const int num_teams = (N_sources + ChunkSizePerTeam - 1) / ChunkSizePerTeam; auto* r_dr_ptr = offload_output.data(); auto* input_ptr = offload_input.data(); { ScopedTimer offload(&offload_timer_); #pragma omp target teams distribute collapse(2) num_teams(total_targets*num_teams) \ map(always, to: input_ptr[:offload_input.size()]) \ map(always, from: r_dr_ptr[:offload_output.size()]) for (int iat = 0; iat < total_targets; ++iat) for (int team_id = 0; team_id < num_teams; team_id++) { auto* target_pos_ptr = reinterpret_cast<RealType*>(input_ptr); const int walker_id = reinterpret_cast<int*>(input_ptr + total_targets * D * realtype_size)[iat]; auto* source_pos_ptr = reinterpret_cast<RealType**>(input_ptr + total_targets * D * realtype_size + total_targets * int_size)[walker_id]; auto* r_iat_ptr = r_dr_ptr + iat * N_sources_padded * (D + 1); auto* dr_iat_ptr = r_dr_ptr + iat * N_sources_padded * (D + 1) + N_sources_padded; const int first = ChunkSizePerTeam * team_id; const int last = (first + ChunkSizePerTeam) > N_sources_local ? N_sources_local : first + ChunkSizePerTeam; T pos[D]; for (int idim = 0; idim < D; idim++) pos[idim] = target_pos_ptr[iat * D + idim]; DTD_BConds<T, D, SC>::computeDistancesOffload(pos, source_pos_ptr, r_iat_ptr, dr_iat_ptr, N_sources_padded, first, last); } } { ScopedTimer copy(&copy_timer_); for (size_t iat = 0; iat < total_targets; iat++) { const int wid = walker_id_ptr[iat]; const int pid = particle_id[iat]; auto& dt = static_cast<SoaDistanceTableABOMP&>(dt_list[wid].get()); assert(N_sources_padded == dt.displacements_[pid].capacity()); auto offset = offload_output.data() + iat * N_sources_padded * (D + 1); std::copy_n(offset, N_sources_padded, dt.distances_[pid].data()); std::copy_n(offset + N_sources_padded, N_sources_padded * D, dt.displacements_[pid].data()); } } } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old) { DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_, 0, N_sources); // If the full table is not ready all the time, overwrite the current value. // If this step is missing, DT values can be undefined in case a move is rejected. if (!need_full_table_) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], 0, N_sources); } ///update the stripe for jat-th particle inline void update(IndexType iat, bool partial_update) { std::copy_n(temp_r_.data(), N_sources, distances_[iat].data()); for (int idim = 0; idim < D; ++idim) std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim)); } size_t get_neighbors(int iat, RealType rcut, int* restrict jid, RealType* restrict dist, PosType* restrict displ) const { constexpr T cminus(-1); size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list jid[nn] = jat; dist[nn] = rij; displ[nn] = cminus * displacements_[jat][iat]; nn++; } } return nn; } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index = -1; if (newpos) { for (int jat = 0; jat < N_sources; ++jat) if (temp_r_[jat] < min_dist) { min_dist = temp_r_[jat]; index = jat; } if (index >= 0) { r = min_dist; dr = temp_dr_[index]; } } else { for (int jat = 0; jat < N_sources; ++jat) if (distances_[iat][jat] < min_dist) { min_dist = distances_[iat][jat]; index = jat; } if (index >= 0) { r = min_dist; dr = displacements_[iat][index]; } } return index; } size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const { size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list dist[nn] = rij; nn++; } } return nn; } }; } // namespace qmcplusplus #endif
crivo.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #define NUM_THREADS 1 int main(int argc, char *argv[]) { long long n; int raizN; double inicio, fim; long long primos = 0; int *array; // 0 = é primo. 1 = não é primo omp_set_num_threads(NUM_THREADS); if (argc < 2) { printf("Entre com o valor de n.\n"); return 1; } else { n = strtol(argv[1], (char**) NULL, 10); } array = (int*) calloc(n, sizeof(int)); array[0] = 1; array[1] = 1; raizN = ((int) sqrt(n)); inicio = omp_get_wtime(); for (long long i = 2; i <= raizN; i++) { if (array[i] == 0) { #pragma omp parallel for schedule(static) // static, pois todos terão a mesma complexidade for (long long j = 2*i; j < n; j+=i) { #pragma omp atomic write array[j] = 1; } } } #pragma omp parallel for reduction(+:primos) for (long long i = 0; i < n; i++) { if (array[i] == 0) primos++; } fim = omp_get_wtime(); printf("Threads: %d\tTempo de execução: %.3f\tN: %lld\tPrimos: %lld\n", NUM_THREADS, fim-inicio, n, primos); }
GB_binop__isle_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__isle_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint16) // A*D function (colscale): GB (_AxD__isle_uint16) // D*A function (rowscale): GB (_DxB__isle_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint16) // C=scalar+B GB (_bind1st__isle_uint16) // C=scalar+B' GB (_bind1st_tran__isle_uint16) // C=A+scalar GB (_bind2nd__isle_uint16) // C=A'+scalar GB (_bind2nd_tran__isle_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT16 || GxB_NO_ISLE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
calculate_E_field_flat_all_in_one.h
REAL HLLE_solve(REAL F0B1_r, REAL F0B1_l, REAL U_r, REAL U_l) { // Eq. 3.15 of https://epubs.siam.org/doi/abs/10.1137/1025002?journalCode=siread // F_HLLE = (c_min F_R + c_max F_L - c_min c_max (U_R-U_L)) / (c_min + c_max) return 0.5*(F0B1_r+F0B1_l-(U_r-U_l)); // FIXME: Curved space implementation! } /* Calculate the electric flux on both faces in the input direction. The input count is an integer that is either 0 or 1. If it is 0, this implies that the components are input in order of a backwards permutation and the final results will need to be multiplied by -1.0. If it is 1, then the permutation is fowards. */ void calculate_E_field_flat_all_in_one(const paramstruct *params, const REAL *Vr0,const REAL *Vr1, const REAL *Vl0,const REAL *Vl1, const REAL *Br0,const REAL *Br1, const REAL *Bl0,const REAL *Bl1, const REAL *Brflux_dirn, const REAL *Blflux_dirn, REAL *A2_rhs,const REAL SIGN,const int flux_dirn) { // FIXME: include metric functions! // This function is written to be generic and compute the contribution for all three AD RHSs. // However, for convenience, the notation used in the function itself is for the contribution // to AD2, specifically the [F_HLL^x(B^y)]_z term, with reconstructions in the x direction. This // corresponds to flux_dirn=0 and count=1 (which corresponds to SIGN=+1.0). // Thus, Az(i,j,k) += 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k)) are solved here. // The other terms are computed by cyclically permuting the indices when calling this function. #include "GiRaFFE_standalone_Ccodes/set_Cparameters.h" #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) { for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) { for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) { // First, we set the index from which we will read memory. indexp1 is incremented by // one point in the direction of reconstruction. These correspond to the faces at at // i-1/2 and i+1/2, respectively. // Now, we read in memory. We need the x and y components of velocity and magnetic field on both // the left and right sides of the interface at *both* faces. // Here, the point (i0,i1,i2) corresponds to the point (i-1/2,j,k) const int index = IDX3S(i0,i1,i2); const double Valenciav_rU0 = Vr0[index]; const double Valenciav_rU1 = Vr1[index]; const double B_rU0 = Br0[index]; const double B_rU1 = Br1[index]; const double B_rflux_dirn = Brflux_dirn[index]; const double Valenciav_lU0 = Vl0[index]; const double Valenciav_lU1 = Vl1[index]; const double B_lU0 = Bl0[index]; const double B_lU1 = Bl1[index]; const double B_lflux_dirn = Blflux_dirn[index]; // ******************************* // REPEAT ABOVE, but at i+1, which corresponds to point (i+1/2,j,k) // Recall that the documentation here assumes flux_dirn==0, but the // algorithm is generalized so that any flux_dirn or velocity/magnetic // field component can be computed via permuting the inputs into this // function. const int indexp1 = IDX3S(i0+(flux_dirn==0),i1+(flux_dirn==1),i2+(flux_dirn==2)); const double Valenciav_rU0_p1 = Vr0[indexp1]; const double Valenciav_rU1_p1 = Vr1[indexp1]; const double B_rU0_p1 = Br0[indexp1]; const double B_rU1_p1 = Br1[indexp1]; const double B_rflux_dirn_p1 = Brflux_dirn[indexp1]; const double Valenciav_lU0_p1 = Vl0[indexp1]; const double Valenciav_lU1_p1 = Vl1[indexp1]; const double B_lU0_p1 = Bl0[indexp1]; const double B_lU1_p1 = Bl1[indexp1]; const double B_lflux_dirn_p1 = Blflux_dirn[indexp1]; // ******************************* // DEBUGGING: // if(flux_dirn==0 && SIGN>0 && i1==Nxx_plus_2NGHOSTS1/2 && i2==Nxx_plus_2NGHOSTS2/2) { // printf("index=%d & indexp1=%d\n",index,indexp1); // } // Since we are computing A_z, the relevant equation here is: // -E_z(x_i,y_j,z_k) = 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) ) // We will construct the above sum one half at a time, first with SIGN=+1, which // corresponds to flux_dirn = 0, count=1, and // takes care of the terms: // [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // ( Note that we will repeat the above with flux_dirn = 1, count = 0, with SIGN=-1 // AND with the input components switched (x->y,y->x) so that we get the term // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) // thus completing the above sum. ) // Here, [F_HLL^i(B^j)]_k = (v^i B^j - v^j B^i) in general. // Calculate the flux vector on each face for each component of the E-field: // The F(B) terms are as Eq. 6 in Giacomazzo: https://arxiv.org/pdf/1009.2468.pdf // [F^i(B^j)]_k = \sqrt{\gamma} (v^i B^j - v^j B^i) // Therefore since we want [F_HLL^x(B^y)]_z, // we will code (v^x B^y - v^y B^x) on both left and right faces. const REAL F0B1_r = (Valenciav_rU0*B_rU1 - Valenciav_rU1*B_rU0); const REAL F0B1_l = (Valenciav_lU0*B_lU1 - Valenciav_lU1*B_lU0); // ZACH SAYS: Make sure the below is documented! // Compute the state vector for this flux direction // We must also multiply by sign so that we use the positive for the forward permutation // and negative for the backwards permutation. For Az, that means that we add +By and -Bx, // exactly as is done in the original GiRaFFE's A_i_rhs_no_gauge_terms.C, in line with // Del Zanna, 2003 [https://arxiv.org/pdf/astro-ph/0210618.pdf], Eq. 44 const REAL U_r = B_rflux_dirn; //B_rU0; const REAL U_l = B_lflux_dirn; // Basic HLLE solver: const REAL FHLL_0B1 = HLLE_solve(F0B1_r, F0B1_l, U_r, U_l); // ************************************ // ************************************ // REPEAT ABOVE, but at point i+1 // Calculate the flux vector on each face for each component of the E-field: const REAL F0B1_r_p1 = (Valenciav_rU0_p1*B_rU1_p1 - Valenciav_rU1_p1*B_rU0_p1); const REAL F0B1_l_p1 = (Valenciav_lU0_p1*B_lU1_p1 - Valenciav_lU1_p1*B_lU0_p1); // Compute the state vector for this flux direction const REAL U_r_p1 = B_rflux_dirn_p1; const REAL U_l_p1 = B_lflux_dirn_p1; //const REAL U_r_p1 = B_rU1_p1; //const REAL U_l_p1 = B_lU1_p1; // Basic HLLE solver, but at the next point: const REAL FHLL_0B1p1 = HLLE_solve(F0B1_r_p1, F0B1_l_p1, U_r_p1, U_l_p1); // ************************************ // ************************************ // With the Riemann problem solved, we add the contributions to the RHSs: // -E_z(x_i,y_j,z_k) &= 0.25 ( [F_HLL^x(B^y)]_z(i+1/2,j,k)+[F_HLL^x(B^y)]_z(i-1/2,j,k) // -[F_HLL^y(B^x)]_z(i,j+1/2,k)-[F_HLL^y(B^x)]_z(i,j-1/2,k) ) // (Eq. 11 in https://arxiv.org/pdf/1009.2468.pdf) // This code, as written, solves the first two terms for flux_dirn=0. Calling this function for count=1 // flips x for y to solve the latter two, switching to SIGN=-1 as well. // Here, we finally add together the output of the HLLE solver at i-1/2 and i+1/2 // We also multiply by the SIGN dictated by the order of the input vectors and divide by 4. A2_rhs[index] += SIGN*0.25*(FHLL_0B1 + FHLL_0B1p1); // flux dirn = 0 ===================> i-1/2 i+1/2 // Eq 11 in Giacomazzo: // -FxBy(avg over i-1/2 and i+1/2) + FyBx(avg over j-1/2 and j+1/2) // Eq 6 in Giacomazzo: // FxBy = vxBy - vyBx // -> // FHLL_0B1 = vyBx - vxBy } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) }
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % Cristy % % October 2001 % % % % % % Copyright @ 2001 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/prepress.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport double GetImageTotalInkDensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; double total_ink_density; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; const Quantum *p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+ GetPixelBlue(image,p)+GetPixelBlack(image,p); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
GB_unop__isinf_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isinf_bool_fc64) // op(A') function: GB (_unop_tran__isinf_bool_fc64) // C type: bool // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = GB_cisinf (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisinf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = GB_cisinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isinf_bool_fc64) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isinf_bool_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/policy.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); if (image->matte != MagickTrue) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->matte == MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q, IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { PixelPacket *color; IndexPacket index; index=(IndexPacket) pixel; if (packet_size == 1) index=(IndexPacket) ScaleQuantumToChar(index); index=ConstrainColormapIndex(image,(ssize_t) index); if (type == 0) SetPixelIndex(indexes+x,index); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(indexes+x); if (type != 0) SetPixelAlpha(color,pixel); SetPixelRGBO(q,color); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if ((channels < 3) || (type == -2)) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case -3: case 1: { SetPixelGreen(q,pixel); break; } case -4: case 2: { SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); mask->matte=MagickFalse; channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) (void) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) (void) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->matte=MagickTrue; } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->matte=MagickTrue; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetLayerInfoSize(psd_info,image); status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize)); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if (has_merged_image != MagickFalse || imageListLength == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.opacity=TransparentOpacity; (void) SetImageBackgroundColor(image); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; next=image; while (next != (Image *) NULL) { (void) SetImageProfile(next,GetStringInfoName(profile),profile); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { ssize_t i, offset, y; if (next_image->compression == RLECompression) { offset=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) offset+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) offset=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else offset=WriteBlobMSBShort(image,Raw); return((size_t) offset); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, length, offset_length; ssize_t count; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsGrayImage(next_image,&next_image->exception) != MagickFalse)) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { ssize_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return((size_t) count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,&image->exception) != MagickFalse)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); (void) SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(unsigned char) ( mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_binop__first_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_fp32 // A.*B function (eWiseMult): GB_AemultB__first_fp32 // A*D function (colscale): GB_AxD__first_fp32 // D*A function (rowscale): GB_DxB__first_fp32 // C+=B function (dense accum): GB_Cdense_accumB__first_fp32 // C+=b function (dense accum): GB_Cdense_accumb__first_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fp32 // C=scalar+B GB_bind1st__first_fp32 // C=scalar+B' GB_bind1st_tran__first_fp32 // C=A+scalar (none) // C=A'+scalar (none) // C type: float // A type: float // B,b type: float // BinaryOp: cij = aij #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__first_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
kpoint.c
/* Copyright (C) 2008 Atsushi Togo */ /* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal); static long get_long_ir_reciprocal_mesh(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static int get_ir_reciprocal_mesh_normal(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static long get_long_ir_reciprocal_mesh_normal(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static int get_ir_reciprocal_mesh_distortion(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static long get_long_ir_reciprocal_mesh_distortion(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static int get_num_ir(int ir_mapping_table[], const int mesh[3]); static long get_long_num_ir(long ir_mapping_table[], const int mesh[3]); static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static long relocate_long_BZ_grid_address(int bz_grid_address[][3], long bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int num_ir; num_ir = get_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); return num_ir; } long kpt_get_long_irreducible_reciprocal_mesh(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { long num_ir; num_ir = get_long_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { int num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } long kpt_get_long_stabilized_reciprocal_mesh(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { long num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_long_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } } void kpt_get_long_grid_points_by_rotations(long rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_long_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_grid_point_double_mesh(address_double, bzmesh)]; } } void kpt_get_long_BZ_grid_points_by_rotations(long rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const long bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_long_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } long kpt_relocate_long_BZ_grid_address(int bz_grid_address[][3], long bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_long_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static int get_ir_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) { return get_ir_reciprocal_mesh_normal(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } else { return get_ir_reciprocal_mesh_distortion(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } } static long get_long_ir_reciprocal_mesh(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) { return get_long_ir_reciprocal_mesh_normal(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } else { return get_long_ir_reciprocal_mesh_distortion(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } } static int get_ir_reciprocal_mesh_normal(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int i, j, grid_point_rot; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); //#pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_num_ir(ir_mapping_table, mesh); } static long get_long_ir_reciprocal_mesh_normal(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int j; long i, grid_point_rot; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); //#pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_long_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_long_num_ir(ir_mapping_table, mesh); } static int get_ir_reciprocal_mesh_distortion(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i, j, k, grid_point_rot, indivisible; int address_double[3], address_double_rot[3], divisor[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (i = 0; i < 3; i++) { divisor[i] = mesh[(i + 1) % 3] * mesh[(i + 2) % 3]; } //#pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); for (j = 0; j < 3; j++) { address_double[j] *= divisor[j]; } ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); for (k = 0; k < 3; k++) { indivisible = address_double_rot[k] % divisor[k]; if (indivisible) {break;} address_double_rot[k] /= divisor[k]; if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) || (address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) { indivisible = 1; break; } } if (indivisible) {continue;} grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_num_ir(ir_mapping_table, mesh); } static long get_long_ir_reciprocal_mesh_distortion(int grid_address[][3], long ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int j, k, indivisible; long i, grid_point_rot; int address_double[3], address_double_rot[3], divisor[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (j = 0; j < 3; j++) { divisor[j] = mesh[(j + 1) % 3] * mesh[(j + 2) % 3]; } //#pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); for (j = 0; j < 3; j++) { address_double[j] *= divisor[j]; } ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); for (k = 0; k < 3; k++) { indivisible = address_double_rot[k] % divisor[k]; if (indivisible) {break;} address_double_rot[k] /= divisor[k]; if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) || (address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) { indivisible = 1; break; } } if (indivisible) {continue;} grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_long_num_ir(ir_mapping_table, mesh); } static int get_num_ir(int ir_mapping_table[], const int mesh[3]) { int i, num_ir; num_ir = 0; //#pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } #ifdef _OPENMP for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]]; } #endif return num_ir; } static long get_long_num_ir(long ir_mapping_table[], const int mesh[3]) { long i, num_ir; num_ir = 0; //#pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } #ifdef _OPENMP for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]]; } #endif return num_ir; } /* Relocate grid addresses to first Brillouin zone */ /* bz_grid_address[prod(mesh + 1)][3] */ /* bz_map[prod(mesh * 2)] */ static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static long relocate_long_BZ_grid_address(int bz_grid_address[][3], long bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; long i, boundary_num_gp, total_num_gp, bzgp, gp; int j, k, min_index; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (j = 0; j < 3; j++) { bzmesh[j] = mesh[j] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_long_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; } static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i; int eq[3]; eq[0] = 0; /* a=b */ eq[1] = 0; /* b=c */ eq[2] = 0; /* c=a */ for (i = 0; i < rot_reciprocal->size; i++) { if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 1 && rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;} if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 0 && rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;} if (rot_reciprocal->mat[i][0][1] == 0 && rot_reciprocal->mat[i][1][1] == 0 && rot_reciprocal->mat[i][2][1] == 1) {eq[1] = 1;} } return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) && ((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) && ((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2]))); }
core_ssyrk.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsyrk.c, normal z -> s, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_syrk * * Performs one of the symmetric rank k operations * * \f[ C = \alpha A \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times A + \beta C, \f] * * where alpha and beta are scalars, C is an n-by-n symmetric * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f] * - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************/ __attribute__((weak)) void plasma_core_ssyrk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const float *A, int lda, float beta, float *C, int ldc) { cblas_ssyrk(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, n, k, (alpha), A, lda, (beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_ssyrk( plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const float *A, int lda, float beta, float *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (trans == PlasmaNoTrans) ak = k; else ak = n; if (sequence->status == PlasmaSuccess) { #pragma omp target nowait \ depend(in:A[0:lda*k]) \ depend(inout:C[0:ldc*n]) \ map(to:A[:lda*k]) \ map(tofrom:C[:ldc*n]) \ firstprivate(alpha, beta, uplo, trans, n, k, ldc, lda) { plasma_core_ssyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } } }
GB_binop__pair_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif